diff --git a/main.py b/main.py index a0d0c59..f6198e7 100644 --- a/main.py +++ b/main.py @@ -42,8 +42,7 @@ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3' } - for link in links: - print("downloading text from: " + link) + for link in links: try: response = requests.get(link, headers=headers, timeout=timeout) if response.status_code == 200: @@ -59,13 +58,13 @@ return extracted_texts -def summarize_individual_texts(texts_and_urls, query, ollama_url="http://localhost:11434/api/generate"): +def summarize_individual_texts(texts_and_urls, query, model, ollama_url="http://localhost:11434/api/generate"): summaries = [] for url, text in texts_and_urls: prompt = f"Extract the relevant information from the following text with regards to the original \ query: '{query}'\n\n{text}\n" payload = { - "model": "command-r", + "model": model, "prompt": prompt, "stream": False, "max_tokens": 1000 @@ -84,7 +83,7 @@ return summaries -def summarize_with_ollama(texts_and_urls, query, ollama_url="http://localhost:11434/api/generate"): +def summarize_with_ollama(texts_and_urls, query, model, ollama_url="http://localhost:11434/api/generate"): # Prepare the context and prompt context = "\n".join([f"URL: {url}\nText: {text}" for url, text in texts_and_urls]) prompt = f"Summarize the following search results with regards to the original query: '{query}' \ @@ -93,7 +92,7 @@ # Create the payload for the POST request payload = { - "model": "command-r", + "model": model, "prompt": prompt, "stream": False, "max_tokens": 1500 @@ -114,14 +113,14 @@ return None -def optimize_search_query(query, ollama_url="http://localhost:11434/api/generate"): +def optimize_search_query(query, model, ollama_url="http://localhost:11434/api/generate"): # Prepare the prompt for optimizing the search query prompt = f"Optimize the following natural language query to improve its effectiveness in a web search.\ Make it very concise. query: '{query}'" # Create the payload for the POST request payload = { - "model": "command-r", + "model": model, "prompt": prompt, "stream": False, "max_tokens": 50 @@ -158,8 +157,10 @@ args = parser.parse_args() original_query = args.query + model = "qc" + # Optimize the search query - optimized_query = optimize_search_query(original_query) + optimized_query = optimize_search_query(original_query, model) print(f"Original Query: {original_query}") print(f"Optimized Query: {optimized_query}") @@ -173,9 +174,8 @@ texts_and_urls = extract_text_from_links(links) print("Summarizing individual search results") - intermediate_summaries = summarize_individual_texts(texts_and_urls, original_query) - - final_summary = summarize_with_ollama(intermediate_summaries, original_query) + intermediate_summaries = summarize_individual_texts(texts_and_urls, original_query, model) + final_summary = summarize_with_ollama(intermediate_summaries, original_query, model) if final_summary: print("\nFinal Summary of search results:\n")