Understand Twitter API rate limits and how to work within them
Twitter API enforces rate limits to prevent abuse and ensure fair usage. Understanding these limits is crucial for building reliable applications with Twikit.
When you exceed a rate limit, Twikit raises a TooManyRequests exception:
from twikit import Clientfrom twikit.errors import TooManyRequestsimport asyncioimport timeclient = Client('en-US')async def search_with_retry(query): try: tweets = await client.search_tweet(query, 'Latest') return tweets except TooManyRequests as e: # Get the reset time from the exception reset_time = e.rate_limit_reset if reset_time: wait_time = reset_time - int(time.time()) print(f'Rate limited. Waiting {wait_time} seconds...') await asyncio.sleep(wait_time) # Retry after waiting return await client.search_tweet(query, 'Latest') else: print('Rate limited. Waiting 15 minutes...') await asyncio.sleep(900) # Wait 15 minutes return await client.search_tweet(query, 'Latest')
The TooManyRequests exception includes a rate_limit_reset attribute with a Unix timestamp indicating when the limit resets. Use this to calculate the exact wait time.
# Good: Request smaller batchestweets = await client.search_tweet('python', 'Latest', count=20)# Process the first batchfor tweet in tweets: process_tweet(tweet)# Get more only if neededif need_more: more_tweets = await tweets.next()
import asynciofrom datetime import datetime, timedeltaclass CachedClient: def __init__(self, client): self.client = client self.cache = {} async def get_user_cached(self, screen_name, cache_duration=300): """Get user with 5-minute cache.""" now = datetime.now() if screen_name in self.cache: user, cached_at = self.cache[screen_name] if now - cached_at < timedelta(seconds=cache_duration): return user # Fetch from API user = await self.client.get_user_by_screen_name(screen_name) self.cache[screen_name] = (user, now) return user
# Bad: Multiple separate requestsfor user_id in user_ids: user = await client.get_user_by_id(user_id) await asyncio.sleep(1) # Still wasteful# Good: Use pagination features# Many Twikit methods return Result objects that efficiently handle batchingfollowers = await user.get_followers(count=100) # Get 100 at oncefor follower in followers: process_follower(follower)
Some methods return AsyncGenerator for streaming results:
# This method yields tweets as they're fetchedasync for tweet in client.get_user_tweets_by_id(user_id, 'Tweets'): print(tweet.text) # Each iteration may trigger an API request when fetching the next batch # Be mindful of rate limits (50 requests/15min for UserTweets)