Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
| 3 |
-
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
| 4 |
-
from llama_index.llms import HuggingFaceLLM
|
| 5 |
-
from llama_index.embeddings import HuggingFaceEmbedding
|
| 6 |
import os
|
| 7 |
|
| 8 |
# Load DeepSeek-R1
|
|
@@ -15,7 +15,14 @@ indicbart_model = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/IndicBART")
|
|
| 15 |
|
| 16 |
# Initialize LlamaIndex components
|
| 17 |
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
|
| 20 |
|
| 21 |
# Build index from documents in 'data' directory
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
| 3 |
+
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
| 4 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
| 5 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 6 |
import os
|
| 7 |
|
| 8 |
# Load DeepSeek-R1
|
|
|
|
| 15 |
|
| 16 |
# Initialize LlamaIndex components
|
| 17 |
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 18 |
+
|
| 19 |
+
llm = HuggingFaceLLM(
|
| 20 |
+
context_window=4096,
|
| 21 |
+
max_new_tokens=512,
|
| 22 |
+
model="deepseek-ai/DeepSeek-R1",
|
| 23 |
+
tokenizer_name="deepseek-ai/DeepSeek-R1"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
|
| 27 |
|
| 28 |
# Build index from documents in 'data' directory
|