API Patterns & Best Practices

API Patterns & Best Practices

Pagination

Handle large result sets with pagination:

1def get_all_audit_logs(org_id, start_date):
2 """Fetch all audit logs with pagination"""
3
4 page = 1
5 all_logs = []
6
7 while True:
8 response = requests.get(
9 'https://api.governanceai.com/v1/audit/logs',
10 headers=headers,
11 params={
12 'org_id': org_id,
13 'start_date': start_date,
14 'page': page,
15 'page_size': 100
16 }
17 )
18
19 data = response.json()
20 all_logs.extend(data['entries'])
21
22 # Check if there are more pages
23 if data['pagination']['page'] >= data['pagination']['total_pages']:
24 break
25
26 page += 1
27
28 return all_logs

Retry Logic

Implement exponential backoff for retries:

1import time
2
3def make_request_with_retry(url, method='POST', data=None, max_retries=3):
4 """Make request with exponential backoff retry"""
5
6 for attempt in range(max_retries):
7 try:
8 if method == 'POST':
9 response = requests.post(url, headers=headers, json=data, timeout=10)
10 else:
11 response = requests.get(url, headers=headers, timeout=10)
12
13 if response.status_code == 429: # Rate limit
14 wait_time = 2 ** attempt # 1, 2, 4 seconds
15 print(f"Rate limited. Waiting {wait_time}s...")
16 time.sleep(wait_time)
17 continue
18
19 response.raise_for_status()
20 return response.json()
21
22 except requests.exceptions.Timeout:
23 if attempt < max_retries - 1:
24 wait_time = 2 ** attempt
25 time.sleep(wait_time)
26 continue
27 raise
28
29 except requests.exceptions.RequestException as e:
30 if attempt < max_retries - 1:
31 wait_time = 2 ** attempt
32 time.sleep(wait_time)
33 continue
34 raise
35
36 raise Exception("Max retries exceeded")

Rate Limit Handling

Check rate limit headers:

1def handle_rate_limits(response):
2 """Extract rate limit information from response headers"""
3
4 remaining = response.headers.get('X-RateLimit-Remaining', 'unknown')
5 limit = response.headers.get('X-RateLimit-Limit', 'unknown')
6 reset_time = response.headers.get('X-RateLimit-Reset', 'unknown')
7
8 print(f"Rate Limit: {remaining}/{limit}")
9 print(f"Reset at: {reset_time}")
10
11 if int(remaining) < 10:
12 print("Warning: Approaching rate limit")
13
14 return {
15 'remaining': remaining,
16 'limit': limit,
17 'reset_time': reset_time
18 }

Batch Operations

Process multiple items efficiently:

1def batch_evaluate_guardrails(messages_list, batch_size=10):
2 """Evaluate multiple messages in batches"""
3
4 results = []
5
6 for i in range(0, len(messages_list), batch_size):
7 batch = messages_list[i:i + batch_size]
8
9 for message in batch:
10 result = requests.post(
11 'https://api.governanceai.com/v1/guardrails/evaluate',
12 headers=headers,
13 json={
14 'messages': [{'role': 'user', 'content': message}],
15 'context': {'org_id': ORG_ID, 'user_id': 'batch_user'}
16 }
17 ).json()
18
19 results.append(result)
20
21 # Small delay between requests
22 time.sleep(0.1)
23
24 return results

Caching Responses

Cache API responses to reduce load:

1import json
2from datetime import datetime, timedelta
3
4cache = {}
5
6def get_cached_policy(policy_id, cache_ttl_minutes=60):
7 """Get policy with caching"""
8
9 cache_key = f"policy_{policy_id}"
10
11 # Check cache
12 if cache_key in cache:
13 cached_data, cached_time = cache[cache_key]
14 if datetime.now() - cached_time < timedelta(minutes=cache_ttl_minutes):
15 return cached_data
16
17 # Fetch from API
18 response = requests.get(
19 f'https://api.governanceai.com/v1/policies/{policy_id}',
20 headers=headers
21 ).json()
22
23 # Store in cache
24 cache[cache_key] = (response, datetime.now())
25
26 return response

Webhook Handling

Process webhook events from GovernanceAI:

1from flask import Flask, request
2import hmac
3import hashlib
4
5app = Flask(__name__)
6WEBHOOK_SECRET = os.getenv('GOVERNANCEAI_WEBHOOK_SECRET')
7
8def verify_webhook_signature(payload, signature):
9 """Verify webhook signature"""
10
11 expected = hmac.new(
12 WEBHOOK_SECRET.encode(),
13 payload,
14 hashlib.sha256
15 ).hexdigest()
16
17 return hmac.compare_digest(signature, expected)
18
19@app.route('/webhooks/governanceai', methods=['POST'])
20def handle_webhook():
21 """Handle GovernanceAI webhook"""
22
23 signature = request.headers.get('X-Signature')
24 payload = request.get_data()
25
26 # Verify signature
27 if not verify_webhook_signature(payload, signature):
28 return {'error': 'Invalid signature'}, 401
29
30 # Process event
31 event = request.json
32 print(f"Received event: {event['type']}")
33
34 if event['type'] == 'vulnerability_detected':
35 handle_vulnerability(event['data'])
36 elif event['type'] == 'scan_completed':
37 handle_scan_complete(event['data'])
38
39 return {'status': 'ok'}, 200

Monitoring & Metrics

Track API usage patterns:

1from collections import defaultdict
2from datetime import datetime
3
4metrics = defaultdict(list)
5
6def track_api_call(endpoint, duration_ms, status_code):
7 """Track API metrics"""
8
9 metrics[endpoint].append({
10 'timestamp': datetime.now(),
11 'duration_ms': duration_ms,
12 'status_code': status_code
13 })
14
15def get_metrics_summary(endpoint):
16 """Get metrics summary for endpoint"""
17
18 calls = metrics[endpoint]
19 durations = [c['duration_ms'] for c in calls]
20
21 return {
22 'total_calls': len(calls),
23 'avg_duration_ms': sum(durations) / len(durations),
24 'p95_duration_ms': sorted(durations)[int(len(durations) * 0.95)],
25 'p99_duration_ms': sorted(durations)[int(len(durations) * 0.99)]
26 }

Best Practices

Do:

  • Use environment variables for credentials
  • Implement retry logic with exponential backoff
  • Cache responses when appropriate
  • Monitor rate limits
  • Verify webhook signatures
  • Handle all error codes
  • Log API calls for debugging
  • Use batch operations for multiple items

Don’t:

  • Hardcode API keys
  • Ignore rate limit headers
  • Make synchronous calls in loops
  • Store sensitive data in logs
  • Skip error handling
  • Make unlimited retries
  • Share API responses publicly

Next Steps