Gemini enables you to build sophisticated customer support solutions that understand multimodal inputs (text, images, audio), integrate with your business systems, and provide personalized, context-aware responses. This guide demonstrates practical implementations using real-world examples.
system_instruction = """You are an expert sales assistant specializing in furniture recommendations.All questions should be answered comprehensively with details."""
# Customer uploads image and asks questioncustomer_query = "Do you have chairs similar to the one in this picture, but in red?"customer_image_url = "https://storage.googleapis.com/samples/armchair.png"# Load product catalog (images from Cloud Storage)product_catalog_parts = []for product in product_catalog: product_catalog_parts.append(f"Chair (id={product['id']}):") product_catalog_parts.append( Part.from_uri(file_uri=product["image_url"], mime_type="image/png") )# Generate responseresponse = client.models.generate_content( model="gemini-2.0-flash", contents=[ customer_query, Part.from_uri(file_uri=customer_image_url, mime_type="image/png"), "catalog:", product_catalog_parts, ], config=GenerateContentConfig( system_instruction=system_instruction, ),)print(response.text)
system_instruction = """You are an interior designer.Your mission is to help customers create living spaces that balance functionality and beauty through personalized service."""customer_query = "Would this chair fit in my room?"chair_url = "gs://samples/red-chair.png"room_url = "gs://samples/living-room.png"response = client.models.generate_content( model="gemini-2.0-flash", contents=[ "Chair:", Part.from_uri(file_uri=chair_url, mime_type="image/png"), "Living room:", Part.from_uri(file_uri=room_url, mime_type="image/png"), customer_query, ], config=GenerateContentConfig( system_instruction=system_instruction, ),)print(response.text)
Create images showing products in customer environments:
response = client.models.generate_content( model="gemini-2.0-flash-preview-image-generation", contents=[ "Chair:", Part.from_uri(file_uri=chair_url, mime_type="image/png"), "Living room:", Part.from_uri(file_uri=room_url, mime_type="image/png"), "Create an image with the chair integrated in the living room", ], config=GenerateContentConfig( response_modalities=["TEXT", "IMAGE"], ),)# Display generated imagefrom IPython.display import Image, displayfor part in response.candidates[0].content.parts: if part.inline_data: display(Image(data=part.inline_data.data))
chat = client.chats.create( model="gemini-2.0-flash", config=GenerateContentConfig( temperature=0, tools=[retail_tool], ),)customer_query = "Is this chair available at a store near me? I'm at Google Cloud Next 2025."response = chat.send_message(customer_query)# Model generates function callsfor function_call in response.function_calls: print(f"Function: {function_call.name}") print(f"Arguments: {function_call.args}")
import numpy as npfrom IPython.display import Audio, Markdown, displayfrom google.genai.types import LiveConnectConfigconfig = LiveConnectConfig( response_modalities=["AUDIO"], tools=[Tool(google_search=GoogleSearch())],)async def main(): async with client.aio.live.connect( model="gemini-2.0-flash-live-preview", config=config ) as session: async def send(): text_input = input("You: ") if text_input.lower() in ("q", "quit", "exit"): return False await session.send_client_content( turns=Content(role="user", parts=[Part(text=text_input)]) ) return True async def receive(): audio_data = [] async for message in session.receive(): if ( message.server_content.model_turn and message.server_content.model_turn.parts ): for part in message.server_content.model_turn.parts: if part.inline_data: audio_data.append( np.frombuffer(part.inline_data.data, dtype=np.int16) ) if message.server_content.turn_complete: display(Markdown("**Assistant:**")) display(Audio(np.concatenate(audio_data), rate=24000, autoplay=True)) break while True: if not await send(): break await receive()# Run the conversationawait main()