An AI-generated trivia quiz that creates multiple-choice questions, reads them aloud, evaluates your spoken answers with fuzzy matching, and tracks your score. Perfect for entertainment or learning.
The ability uses structured prompts to generate valid JSON:
GENERATE_QUESTIONS_PROMPT = ( "Generate {num} multiple-choice questions on {cat}. " "Each question should have four choices labeled A, B, C, and D, and specify the correct answer. " "Return ONLY a JSON list where each element has 'question', 'choices' (list of strings), " "and 'correct_answer'. No other text.")async def generate_questions(self): """Use the LLM to generate quiz questions as JSON.""" try: prompt = GENERATE_QUESTIONS_PROMPT.format( num=NUM_QUESTIONS, cat=QUIZ_CATEGORY ) raw = self.capability_worker.text_to_text_response(prompt) clean = raw.replace("```json", "").replace("```", "").strip() self.quiz_questions = json.loads(clean) except json.JSONDecodeError: self.worker.editor_logging_handler.error("[Quiz] Failed to parse questions JSON") await self.capability_worker.speak("Sorry, I had trouble generating questions. Let me try again.") await self.generate_questions()
Example Generated JSON
[ { "question": "What is the largest planet in our solar system?", "choices": ["A. Mars", "B. Jupiter", "C. Saturn", "D. Earth"], "correct_answer": "Jupiter" }, { "question": "Who painted the Mona Lisa?", "choices": ["A. Michelangelo", "B. Leonardo da Vinci", "C. Raphael", "D. Donatello"], "correct_answer": "Leonardo da Vinci" }, { "question": "What is the capital of Japan?", "choices": ["A. Seoul", "B. Beijing", "C. Tokyo", "D. Bangkok"], "correct_answer": "Tokyo" }]
The ability uses LLM reasoning to accept synonyms and variations:
ANSWER_CHECK_PROMPT = ( "Question: '{question}'\n" "Correct answer: '{correct}'\n" "User's response: '{user_answer}'\n" "Is the user's response correct? Consider synonyms and variations. " "Respond with only 'yes' or 'no'.")async def ask_question(self, question_data: dict) -> bool | None: """Ask one question, return True/False/None (None = user wants to exit).""" question_text = question_data["question"] + " " + " ".join(question_data["choices"]) await self.capability_worker.speak(question_text) user_answer = await self.capability_worker.user_response() # Check if user wants to exit if any(word in user_answer.lower() for word in EXIT_WORDS): await self.capability_worker.speak("Exiting the quiz. See you next time!") return None # Use LLM to check the answer check_prompt = ANSWER_CHECK_PROMPT.format( question=question_data["question"], correct=question_data["correct_answer"], user_answer=user_answer, ) result = self.capability_worker.text_to_text_response(check_prompt) return "yes" in result.lower()
async def run_quiz(self): correct_count = 0 await self.capability_worker.speak( QUIZ_INTRO.format(num=NUM_QUESTIONS, cat=QUIZ_CATEGORY) ) # Generate questions await self.generate_questions() # Ask each question for question in self.quiz_questions[:NUM_QUESTIONS]: is_correct = await self.ask_question(question) if is_correct is None: break # User exited if is_correct: await self.capability_worker.speak(FEEDBACK_CORRECT) correct_count += 1 else: await self.capability_worker.speak(FEEDBACK_INCORRECT) else: # Only show final score if we completed all questions await self.capability_worker.speak( FINAL_SCORE.format(correct=correct_count, total=NUM_QUESTIONS) ) self.capability_worker.resume_normal_flow()
User: "Start a quiz"AI: "Welcome to the Quiz! I'll ask you 3 questions on General Knowledge."AI: "What is the largest planet in our solar system? A. Mars B. Jupiter C. Saturn D. Earth"User: "Jupiter"AI: "That's correct!"AI: "Who painted the Mona Lisa? A. Michelangelo B. Leonardo da Vinci C. Raphael D. Donatello"User: "Da Vinci"AI: "That's correct!"AI: "What is the capital of Japan? A. Seoul B. Beijing C. Tokyo D. Bangkok"User: "Tokyo"AI: "That's correct!"AI: "You got 3 out of 3 correct! Thanks for playing!"
The LLM evaluator accepts variations like “B”, “option B”, “Jupiter”, “the big one” for flexible voice input!