script = generate_script( data["videoSubject"], paragraph_number, ai_model, voice, data["customPrompt"],)if not script: raise RuntimeError( "Could not generate a script. Try a different model or prompt." )
The generate_script function uses Ollama’s chat API:
Backend/gpt.py
def generate_script( video_subject: str, paragraph_number: int, ai_model: str, voice: str, customPrompt: str,) -> Optional[str]: if customPrompt: prompt = customPrompt else: prompt = f""" Generate a script for a video, depending on the subject of the video. The script is to be returned as a string with the specified number of paragraphs. YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT. YOU MUST WRITE THE SCRIPT IN THE LANGUAGE SPECIFIED IN [LANGUAGE]. ONLY RETURN THE RAW CONTENT OF THE SCRIPT. Subject: {video_subject} Number of paragraphs: {paragraph_number} Language: {voice} """ response = generate_response(prompt, ai_model) # Clean the response response = response.replace("*", "").replace("#", "") response = re.sub(r"\[.*\]", "", response) response = re.sub(r"\(.*\)", "", response) paragraphs = response.split("\n\n") selected_paragraphs = paragraphs[:paragraph_number] final_script = "\n\n".join(selected_paragraphs) return final_script
Custom prompts override the default script generation prompt entirely.
def get_search_terms( video_subject: str, amount: int, script: str, ai_model: str) -> List[str]: prompt = f""" Generate {amount} search terms for stock videos, depending on the subject of a video. Subject: {video_subject} The search terms are to be returned as a JSON-Array of strings. Each search term should consist of 1-3 words. YOU MUST ONLY RETURN THE JSON-ARRAY OF STRINGS. For context, here is the full text: {script} """ response = generate_response(prompt, ai_model) search_terms = json.loads(response) return search_terms
video_urls = []it = 15 # Max results per searchmin_dur = 10 # Minimum duration in secondsfor search_term in search_terms: guard_cancelled() # Check for cancellation found_urls = search_for_stock_videos( search_term, os.getenv("PEXELS_API_KEY"), it, min_dur ) for url in found_urls: if url not in video_urls: video_urls.append(url) break # Take first unique videoif not video_urls: raise RuntimeError("No videos found to download.")
Download videos:
Backend/pipeline.py
video_paths = []emit(f"[+] Downloading {len(video_urls)} videos...", "info")for video_url in video_urls: guard_cancelled() try: saved_video_path = save_video(video_url) video_paths.append(saved_video_path) except Exception: emit(f"[-] Could not download video: {video_url}", "error")
def guard_cancelled() -> None: if is_cancelled and is_cancelled(): raise PipelineCancelled("Video generation was cancelled.")# Called before expensive operations:guard_cancelled() # Before script generationguard_cancelled() # Before video downloadsguard_cancelled() # Before TTSguard_cancelled() # Before final render
When cancelled, the worker catches the exception:
Backend/worker.py
try: result_path = run_generation_pipeline(...) mark_completed(session, job_id, result_path)except PipelineCancelled as err: mark_cancelled(session, job_id, str(err))except Exception as err: mark_failed(session, job_id, str(err))