The Mention API returns large datasets in pages to optimize performance and reduce response sizes. This guide covers all pagination strategies, from basic cursor-based pagination to automatic iteration with iter_mentions().
Paginated responses include metadata for navigating pages:
from mention import MentionClientclient = MentionClient(access_token="token")response = client.get_mentions("account-id", "alert-id", limit=100)# MentionsResponse structureprint(response.mentions) # List of Mention objects (up to 100)print(response.has_more) # Boolean: more pages available?print(response.links) # Links object with pagination URLsprint(response.links.more) # Cursor for next page
Response attributes:
mentions (list[Mention]): Current page of results
has_more (bool): Whether more results exist
links (Links): Navigation links for pagination
links.more (str | None): Cursor token for next page
The iter_mentions() method handles pagination automatically, yielding mentions one at a time:
Sync
Async
from mention import MentionClientclient = MentionClient(access_token="token")# Automatically iterate through all mentionsfor mention in client.iter_mentions( "account-id", "alert-id", limit=100 # Page size (fetches 100 per request)): print(f"{mention.title} - {mention.published_at}") # Process mention...# The method handles:# - Fetching pages automatically# - Following pagination cursors# - Stopping when no more results
import asynciofrom mention import AsyncMentionClientasync def process_all_mentions(): client = AsyncMentionClient(access_token="token") # Async iteration through all mentions async for mention in client.iter_mentions( "account-id", "alert-id", limit=100 # Page size ): print(f"{mention.title} - {mention.published_at}") # Process mention... await client.close()asyncio.run(process_all_mentions())
iter_mentions() is available for both MentionClient (sync) and AsyncMentionClient (async). Note that the async version uses async for instead of regular for.
For fine-grained control, manually manage pagination with cursors:
Sync
Async
from mention import MentionClientclient = MentionClient(access_token="token")cursor = Noneall_mentions = []while True: # Fetch page with optional cursor response = client.get_mentions( "account-id", "alert-id", limit=100, cursor=cursor ) # Process current page all_mentions.extend(response.mentions) print(f"Fetched {len(response.mentions)} mentions") # Check if more pages exist if not response.has_more or not response.links or not response.links.more: break # Update cursor for next page cursor = response.links.moreprint(f"Total: {len(all_mentions)} mentions")
import asynciofrom mention import AsyncMentionClientasync def fetch_all_mentions(): client = AsyncMentionClient(access_token="token") cursor = None all_mentions = [] while True: # Fetch page with optional cursor response = await client.get_mentions( "account-id", "alert-id", limit=100, cursor=cursor ) # Process current page all_mentions.extend(response.mentions) print(f"Fetched {len(response.mentions)} mentions") # Check if more pages exist if not response.has_more or not response.links or not response.links.more: break # Update cursor for next page cursor = response.links.more print(f"Total: {len(all_mentions)} mentions") await client.close() return all_mentionsasyncio.run(fetch_all_mentions())
from mention import MentionClientfrom tqdm import tqdmdef process_mentions_with_progress(account_id, alert_id): """Process mentions with progress bar""" client = MentionClient(access_token="token") # First, get total count (fetch first page) first_page = client.get_mentions(account_id, alert_id, limit=1) # Note: API doesn't return total count, so we estimate processed = 0 unread_count = 0 # Use tqdm for progress bar with tqdm(desc="Processing mentions", unit=" mentions") as pbar: for mention in client.iter_mentions(account_id, alert_id, limit=100): # Process mention if not mention.read: unread_count += 1 processed += 1 pbar.update(1) print(f"\nProcessed {processed} mentions ({unread_count} unread)")process_mentions_with_progress("account-id", "alert-id")
import asynciofrom mention import AsyncMentionClientasync def process_alert_mentions(client, account_id, alert): """Process mentions for a single alert""" count = 0 async for mention in client.iter_mentions( account_id, alert.id, limit=100 ): # Process mention count += 1 return alert.name, countasync def process_all_alerts_parallel(account_id): """Process mentions for all alerts in parallel""" async with AsyncMentionClient(access_token="token") as client: # Fetch all alerts alerts_response = await client.get_alerts(account_id) # Process all alerts concurrently tasks = [ process_alert_mentions(client, account_id, alert) for alert in alerts_response.alerts ] results = await asyncio.gather(*tasks) # Display results for alert_name, count in results: print(f"{alert_name}: {count} mentions")# Runasyncio.run(process_all_alerts_parallel("account-id"))
Use automatic iteration unless you need fine control:
# ✅ Good: Simple and cleanfor mention in client.iter_mentions(account_id, alert_id): process(mention)# ❌ Unnecessary: Manual pagination for simple casecursor = Nonewhile True: response = client.get_mentions(account_id, alert_id, cursor=cursor) for mention in response.mentions: process(mention) if not response.has_more: break cursor = response.links.more
Use Filters to Reduce Data Transfer
Apply filters to fetch only what you need:
# ✅ Good: Filter server-sidefor mention in client.iter_mentions( account_id, alert_id, read=False, # Only unread tone="negative" # Only negative): process(mention)# ❌ Bad: Fetch everything then filter client-sidefor mention in client.iter_mentions(account_id, alert_id): if not mention.read and mention.tone == "negative": process(mention)
Optimize Page Size for Your Use Case
Balance request frequency vs response size:
# For real-time UI: smaller pagesfor mention in client.iter_mentions(account_id, alert_id, limit=20): display(mention) # Update UI frequently# For batch processing: larger pagesfor mention in client.iter_mentions(account_id, alert_id, limit=1000): process_batch(mention) # Minimize requests
Handle Interruptions in Long-Running Jobs
Save progress for resumable processing:
# Implement checkpointing for long-running jobscheckpoint = load_checkpoint()try: for i, mention in enumerate(client.iter_mentions(...)): process(mention) if i % 100 == 0: save_checkpoint(i)except KeyboardInterrupt: save_checkpoint(i) print("Progress saved, can resume later")
# ✅ Good: Memory-efficient streamingfor mention in client.iter_mentions(account_id, alert_id, limit=1000): process(mention) # Process one at a time # Previous mentions are garbage collected# ❌ Bad: Loads everything into memoryall_mentions = []for mention in client.iter_mentions(account_id, alert_id): all_mentions.append(mention) # Memory grows unbounded