Zack Saadioui
8/24/2024
1
pip install langchain==0.2 transformers==4.33.2 torch==2.0.1 faiss-cpu sentence-transformers
1
2
3
python
from langchain.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
1 2 3 4 5
model_name = "meta-llama/Llama-2-13b-chat-hf" # Adjust model_name based on GPU capacity tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
1
2
python
llm = HuggingFacePipeline(pipeline=generation_pipeline)
1
2
3
python
response = chain.run(input_text="What are some must-visit places in Paris?")
print(response)
Copyright © Arsturn 2025