Spaces:
Running
Running
Update model.py
Browse files
model.py
CHANGED
|
@@ -979,7 +979,7 @@ async def query_document_info(niche_cases, query_word, alternative_query_word, s
|
|
| 979 |
2. RAG with semantic search and LLM (general, flexible, cost-optimized).
|
| 980 |
"""
|
| 981 |
print("inside the model.query_doc_info")
|
| 982 |
-
outputs, links = {}, []
|
| 983 |
if model_ai:
|
| 984 |
if model_ai == "gemini-1.5-flash-latest":
|
| 985 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
@@ -1245,6 +1245,10 @@ async def query_document_info(niche_cases, query_word, alternative_query_word, s
|
|
| 1245 |
f"\nText Snippets:\n{context_for_llm}")
|
| 1246 |
|
| 1247 |
print("this is prompt: ", prompt_for_llm)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1248 |
if model_ai:
|
| 1249 |
print("back up to ", model_ai)
|
| 1250 |
#llm_response_text, model_instance = call_llm_api(prompt_for_llm, model=model_ai)
|
|
@@ -1378,7 +1382,8 @@ async def query_document_info(niche_cases, query_word, alternative_query_word, s
|
|
| 1378 |
f"2. The **second line onward** contains the explanations based on the non-unknown {again_output_format} answer.\n"
|
| 1379 |
f"\nText Snippets:\n{context_for_llm}")
|
| 1380 |
print("len of prompt:", len(general_knowledge_prompt))
|
| 1381 |
-
|
|
|
|
| 1382 |
if general_knowledge_prompt:
|
| 1383 |
if model_ai:
|
| 1384 |
print("back up to ", model_ai)
|
|
@@ -1423,5 +1428,5 @@ async def query_document_info(niche_cases, query_word, alternative_query_word, s
|
|
| 1423 |
|
| 1424 |
print("all done and method used: ", outputs, method_used)
|
| 1425 |
print("total cost: ", total_query_cost)
|
| 1426 |
-
return outputs, method_used, total_query_cost, links
|
| 1427 |
|
|
|
|
| 979 |
2. RAG with semantic search and LLM (general, flexible, cost-optimized).
|
| 980 |
"""
|
| 981 |
print("inside the model.query_doc_info")
|
| 982 |
+
outputs, links, accession_found_in_text = {}, [], False
|
| 983 |
if model_ai:
|
| 984 |
if model_ai == "gemini-1.5-flash-latest":
|
| 985 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
|
|
| 1245 |
f"\nText Snippets:\n{context_for_llm}")
|
| 1246 |
|
| 1247 |
print("this is prompt: ", prompt_for_llm)
|
| 1248 |
+
# check if accession in text or not
|
| 1249 |
+
if alternative_query_word_cleaned.lower() in prompt_for_llm.lower():
|
| 1250 |
+
accession_found_in_text = True
|
| 1251 |
+
|
| 1252 |
if model_ai:
|
| 1253 |
print("back up to ", model_ai)
|
| 1254 |
#llm_response_text, model_instance = call_llm_api(prompt_for_llm, model=model_ai)
|
|
|
|
| 1382 |
f"2. The **second line onward** contains the explanations based on the non-unknown {again_output_format} answer.\n"
|
| 1383 |
f"\nText Snippets:\n{context_for_llm}")
|
| 1384 |
print("len of prompt:", len(general_knowledge_prompt))
|
| 1385 |
+
if alternative_query_word_cleaned.lower() in general_knowledge_prompt.lower():
|
| 1386 |
+
accession_found_in_text = True
|
| 1387 |
if general_knowledge_prompt:
|
| 1388 |
if model_ai:
|
| 1389 |
print("back up to ", model_ai)
|
|
|
|
| 1428 |
|
| 1429 |
print("all done and method used: ", outputs, method_used)
|
| 1430 |
print("total cost: ", total_query_cost)
|
| 1431 |
+
return outputs, method_used, total_query_cost, links, accession_found_in_text
|
| 1432 |
|