curl --request POST \
--url https://api.example.com/research/batch \
--header 'Content-Type: application/json' \
--data '
{
"research_goal": "<string>",
"company_domains": [
{}
],
"search_depth": {},
"max_parallel_searches": 123,
"confidence_threshold": 123
}
'{
"research_id": "<string>",
"total_companies": 123,
"search_strategies_generated": 123,
"total_searches_executed": 123,
"processing_time_ms": 123,
"results": [
{
"domain": "<string>",
"confidence_score": 123,
"evidence_sources": 123,
"findings": {
"technologies": [
{}
],
"evidence": [
{
"url": "<string>",
"title": "<string>",
"snippet": "<string>",
"source_name": "<string>"
}
],
"signals_found": 123
}
}
],
"search_performance": {
"queries_per_second": 123,
"failed_requests": 123
},
"422 Unprocessable Entity": {},
"500 Internal Server Error": {}
}Synchronous endpoint for complete company research results
curl --request POST \
--url https://api.example.com/research/batch \
--header 'Content-Type: application/json' \
--data '
{
"research_goal": "<string>",
"company_domains": [
{}
],
"search_depth": {},
"max_parallel_searches": 123,
"confidence_threshold": 123
}
'{
"research_id": "<string>",
"total_companies": 123,
"search_strategies_generated": 123,
"total_searches_executed": 123,
"processing_time_ms": 123,
"results": [
{
"domain": "<string>",
"confidence_score": 123,
"evidence_sources": 123,
"findings": {
"technologies": [
{}
],
"evidence": [
{
"url": "<string>",
"title": "<string>",
"snippet": "<string>",
"source_name": "<string>"
}
],
"signals_found": 123
}
}
],
"search_performance": {
"queries_per_second": 123,
"failed_requests": 123
},
"422 Unprocessable Entity": {},
"500 Internal Server Error": {}
}POST /research/batch
["stripe.com", "paypal.com", "square.com"]quick - 3-5 strategies, 10-20 seconds per companystandard - 8-12 strategies, 30-60 seconds per companycomprehensive - 15-20+ strategies, 90-180 seconds per companystandard for most use cases.5-10 - Conservative, prevents rate limits20 - Balanced performance (recommended)40-50 - Aggressive, requires high API quotas0.5-0.6 - Inclusive, more potential matches0.7-0.8 - Balanced, good signal-to-noise0.9+ - Strict, only high-confidence matchesShow CompanyResearchResult object
Show Findings object
curl -X POST http://localhost:8000/research/batch \
-H "Content-Type: application/json" \
-d '{
"research_goal": "Find fintech companies using AI for fraud detection",
"company_domains": ["stripe.com", "paypal.com"],
"search_depth": "standard",
"max_parallel_searches": 20,
"confidence_threshold": 0.7
}'
{
"research_id": "a7f3c8e9-4b2d-4a1e-8c5f-9d7e6f8a3b2c",
"total_companies": 2,
"search_strategies_generated": 12,
"total_searches_executed": 24,
"processing_time_ms": 34200,
"results": [
{
"domain": "stripe.com",
"confidence_score": 0.92,
"evidence_sources": 3,
"findings": {
"technologies": [
"tensorflow",
"python",
"kubernetes",
"radar",
"machine-learning"
],
"evidence": [
{
"url": "https://stripe.com/blog/radar-2.0",
"title": "Introducing Radar 2.0: Advanced fraud detection with machine learning",
"snippet": "Stripe Radar uses adaptive machine learning algorithms to detect and prevent fraud in real-time across millions of transactions...",
"source_name": "google_search"
},
{
"url": "https://stripe.com/jobs/listing/machine-learning-engineer-fraud/5678",
"title": "Machine Learning Engineer - Fraud Detection",
"snippet": "Build and deploy ML models for real-time fraud detection using TensorFlow and Python. Work on Stripe Radar...",
"source_name": "jobs_search"
},
{
"url": "https://newsapi.org/stripe-announces-100m-investment",
"title": "Stripe Announces $100M Investment in AI Fraud Prevention",
"snippet": "Payment processor Stripe today announced a major investment in artificial intelligence capabilities for fraud detection...",
"source_name": "news_search"
}
],
"signals_found": 8
}
},
{
"domain": "paypal.com",
"confidence_score": 0.85,
"evidence_sources": 2,
"findings": {
"technologies": [
"deep-learning",
"java",
"scala",
"risk-management"
],
"evidence": [
{
"url": "https://www.paypal.com/us/webapps/mpp/security/fraud-protection",
"title": "PayPal Fraud Protection - Advanced Security",
"snippet": "Our advanced AI and machine learning systems monitor transactions 24/7 to detect and prevent fraudulent activity...",
"source_name": "google_search"
},
{
"url": "https://newsapi.org/paypal-fraud-detection-ai",
"title": "PayPal Enhances Fraud Detection with Deep Learning",
"snippet": "PayPal has deployed new deep learning models that reduce false positives by 30% while catching more fraud...",
"source_name": "news_search"
}
],
"signals_found": 6
}
}
],
"search_performance": {
"queries_per_second": 18.5,
"failed_requests": 2
}
}
{
"detail": [
{
"loc": ["body", "confidence_threshold"],
"msg": "ensure this value is less than or equal to 1.0",
"type": "value_error.number.not_le"
}
]
}
{
"detail": "Failed to generate search strategies: API key invalid"
}
max_parallel_searches based on your API quotas:
{
"max_parallel_searches": 10,
"search_depth": "quick"
}
confidence_threshold to reduce noise:
// High precision - only strong matches
const strictSearch = {
confidence_threshold: 0.85,
// ...
};
// High recall - more potential matches
const broadSearch = {
confidence_threshold: 0.6,
// ...
};
import asyncio
import aiohttp
BATCH_SIZE = 50
async def research_batch(domains, research_goal):
async with aiohttp.ClientSession() as session:
for i in range(0, len(domains), BATCH_SIZE):
batch = domains[i:i + BATCH_SIZE]
payload = {
'research_goal': research_goal,
'company_domains': batch,
'search_depth': 'standard',
'max_parallel_searches': 20,
'confidence_threshold': 0.7,
}
async with session.post(
'http://localhost:8000/research/batch',
json=payload
) as resp:
data = await resp.json()
yield data
# Process 500 companies in batches of 50
domains = [f"company{i}.com" for i in range(500)]
async for result in research_batch(domains, "Find AI startups"):
print(f"Batch complete: {len(result['results'])} companies")
def filter_high_confidence(response, min_confidence=0.8):
"""Filter results by confidence score."""
return [
result for result in response['results']
if result['confidence_score'] >= min_confidence
]
data = response.json()
high_confidence = filter_high_confidence(data, min_confidence=0.85)
print(f"Found {len(high_confidence)} high-confidence matches")
function extractAllTechnologies(response) {
const allTechs = new Set();
response.results.forEach(result => {
result.findings.technologies.forEach(tech => {
allTechs.add(tech);
});
});
return Array.from(allTechs);
}
const technologies = extractAllTechnologies(data);
console.log('Technologies found:', technologies);
// Output: ['tensorflow', 'python', 'kubernetes', 'java', ...]
import csv
def export_to_csv(response, filename='research_results.csv'):
"""Export research results to CSV."""
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([
'Domain',
'Confidence',
'Evidence Sources',
'Technologies',
'Signals Found'
])
for result in response['results']:
writer.writerow([
result['domain'],
result['confidence_score'],
result['evidence_sources'],
', '.join(result['findings']['technologies']),
result['findings']['signals_found']
])
export_to_csv(data)