Zack Saadioui
8/26/2024
1
2
bash
pip install fastapi uvicorn llama-index
1
2
3
4
5
6
7
/my_fastapi_llamaindex_app
├── app
│ ├── main.py
│ ├── llama_index.py
│ └── requirements.txt
└── data
└── source_files
1
main.py
1
app
1
2
bash
uvicorn app.main:app --reload
1
http://127.0.0.1:8000
1
http://127.0.0.1:8000/docs
1
llama_index.py
1 2
def query(self, input_query): return self.index.query(input_query)
1
MyLlamaIndex
1 2 3 4 5 6 7 8 9 10 11
@app.get("/llama-query") async def llama_query(query: str): result = llama_index.query(query) return { "query": query, "result": result, "metadata": { "total_results": len(result), "query_time": "insert_time_here" } }
1
2
3
fastapi
uvicorn
llama-index
1
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
dockerfile
# Start from the official Python image
FROM python:3.9
# Set the working directory
WORKDIR /app
# Copy dependencies first
COPY ./requirements.txt .
RUN pip install -r requirements.txt
# Copy the rest of the application
COPY . .
# Expose the port and run the application
EXPOSE 8000
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
1
2
bash
docker build -t my_fastapi_app .
1
2
bash
docker run -d -p 8000:8000 my_fastapi_app
Copyright © Arsturn 2025