Gemini excels at understanding and generating code across multiple programming languages. By combining code generation with Retrieval Augmented Generation (RAG), you can build intelligent coding assistants that understand your specific codebase and coding standards.
from langchain.chains import RetrievalQAfrom langchain.prompts import PromptTemplaterag_prompt = """You are a proficient Python developer. Generate syntactically correct and concise code.Rules:1. Use the context to understand APIs and apply them correctly2. Do not add license information to the output3. Do not include Colab-specific code4. Ensure all requirements in the question are metQuestion:{question}Context:{context}Code:"""prompt_template = PromptTemplate( template=rag_prompt, input_variables=["context", "question"])qa_chain = RetrievalQA.from_llm( llm=code_llm, prompt=prompt_template, retriever=retriever, return_source_documents=True,)
user_question = """Create a Python function that takes a prompt and generates a prediction using LangChain with the Vertex AI Gemini model."""results = qa_chain.invoke(input={"query": user_question})print(results["result"])
Output:
def predict_with_gemini(prompt: str) -> str: """Generate prediction using Vertex AI Gemini via LangChain. Args: prompt: Input text prompt Returns: Model's text response """ from langchain_google_vertexai import VertexAI import vertexai # Initialize Vertex AI vertexai.init(project="your-project", location="us-central1") # Create model instance llm = VertexAI( model_name="gemini-2.0-flash", max_output_tokens=2048, temperature=0.1, ) # Generate response response = llm.invoke(input=prompt) return response# Usageresult = predict_with_gemini("Explain quantum computing")print(result)
user_question = """Create a Python function that takes text input and returns embeddings using LangChain with Vertex AI text-embedding-005 model."""results = qa_chain.invoke(input={"query": user_question})print(results["result"])
Output:
def get_embeddings(text: str | list[str]) -> list[list[float]]: """Generate embeddings for text using Vertex AI. Args: text: Single string or list of strings to embed Returns: List of embedding vectors """ from langchain_google_vertexai import VertexAIEmbeddings import vertexai vertexai.init(project="your-project", location="us-central1") embeddings = VertexAIEmbeddings( model_name="text-embedding-005", ) if isinstance(text, str): text = [text] vectors = embeddings.embed_documents(text) return vectors# Usageembeddings = get_embeddings(["Hello world", "AI is amazing"])print(f"Generated {len(embeddings)} embeddings")
from google import genaifrom google.genai.types import GenerateContentConfigclient = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)legacy_code = """def process_data(data): result = [] for i in range(len(data)): if data[i] > 0: result.append(data[i] * 2) return result"""refactoring_prompt = f"""Refactor this code to use modern Python best practices:- Use list comprehensions- Add type hints- Add docstrings- Use descriptive variable namesCode:{legacy_code}"""response = client.models.generate_content( model="gemini-2.0-flash", contents=refactoring_prompt, config=GenerateContentConfig(temperature=0.1),)print(response.text)
Output:
def process_positive_numbers(data: list[int | float]) -> list[int | float]: """Double all positive numbers in the input list. Args: data: List of numeric values Returns: List containing doubled positive values """ return [value * 2 for value in data if value > 0]
undocumented_code = """def calculate_metrics(transactions, start_date, end_date): filtered = [t for t in transactions if start_date <= t['date'] <= end_date] total = sum(t['amount'] for t in filtered) avg = total / len(filtered) if filtered else 0 return {'total': total, 'average': avg, 'count': len(filtered)}"""doc_prompt = f"""Add comprehensive docstrings to this function following Google style:{undocumented_code}"""response = client.models.generate_content( model="gemini-2.0-flash", contents=doc_prompt,)print(response.text)