The current system prompt is located in app.py:59-66:
system_prompt = '''Use o contexto para responder as perguntas.Se não encontrar uma resposta no contexto,explique que não há informações disponíveis.Responda em formato de markdown e com visualizaçõeselaboradas e interativas.Contexto: {context}'''
To customize the system prompt for different use cases:
# app.py - line 59system_prompt = '''Use the provided context to answer questions.If you cannot find an answer in the context,explain that the information is not available.Respond in markdown format with detailed and interactive visualizations.Context: {context}'''
The {context} placeholder is required - it’s where the retrieved document chunks are injected by LangChain.
To set a default model instead of the first option:
# app.py - line 111selected_model = st.selectbox( label='Informe o llm que deseja:', options=model_options, index=4 # Sets 'gpt-4o' as default (0-indexed))
Limit conversation history to prevent context overflow:
# Add after line 139 in app.pyst.session_state.messages.append({'role':'user', 'content': question})# Keep only last 10 messages (5 exchanges)if len(st.session_state.messages) > 10: st.session_state.messages = st.session_state.messages[-10:]
# app.py - line 95 (enhance the sidebar)with st.sidebar: st.markdown("### 📁 Document Upload") uploaded_files = st.file_uploader( label='Upload your PDF files:', accept_multiple_files=True, type='pdf', help='Upload one or more PDF documents to chat with' ) st.markdown("---") # Divider st.markdown("### 🤖 Model Selection") selected_model = st.selectbox( label='Choose AI Model:', options=model_options, help='Select the OpenAI model for responses' ) # Add statistics st.markdown("---") st.markdown("### 📊 Statistics") if vector_store: st.metric("Documents Loaded", "✓") st.metric("Messages", len(st.session_state.get('messages', [])))
# Replace the ask_question function (app.py:55)def ask_question(model, query, vector_store): llm = ChatOpenAI(model=model) retriever = vector_store.as_retriever( search_kwargs={"k": 4} # Return top 4 chunks ) # Retrieve documents with metadata docs = retriever.get_relevant_documents(query) # Format context with sources context_with_sources = "\n\n".join([ f"[Source {i+1}] {doc.page_content}" for i, doc in enumerate(docs) ]) system_prompt = ''' Use the provided context to answer questions. Include source numbers [Source N] in your response. Context: {context} ''' # Continue with existing chain logic...
from langdetect import detectdef get_system_prompt(user_query): try: lang = detect(user_query) if lang == 'en': return "Answer in English using the context..." elif lang == 'pt': return "Responda em português usando o contexto..." elif lang == 'es': return "Responde en español usando el contexto..." except: pass return "Use the context to answer..." # Default
This requires adding langdetect to your requirements.txt