Optimize your monitoring implementation for minimal overhead and maximum reliability
SEER monitoring should add minimal overhead to your scripts. Follow these guidelines:
import threading
from seerpy import SEER
seer = SEER(api_key="your_api_key")
def send_to_seer(status, **kwargs):
"""Send monitoring data in background thread"""
thread = threading.Thread(
target=lambda: getattr(seer, status)(**kwargs)
)
thread.daemon = True
thread.start()
# Usage - doesn't block main thread
try:
results = process_data()
send_to_seer('success', metadata={"count": len(results)})
except Exception as e:
send_to_seer('error', error_message=str(e))
# Script continues immediately without waiting for API callimport time
from seerpy import SEER
seer = SEER(api_key="your_api_key")
HEARTBEAT_INTERVAL = 300 # 5 minutes
last_heartbeat = time.time()
for item in large_dataset:
# Process item
process(item)
# Send heartbeat if 5 minutes have passed
if time.time() - last_heartbeat > HEARTBEAT_INTERVAL:
seer.heartbeat("pipeline-id")
last_heartbeat = time.time()
seer.success()import requests
# Configure timeouts for API calls
CONNECT_TIMEOUT = 5 # seconds to establish connection
READ_TIMEOUT = 10 # seconds to receive response
try:
response = requests.post(
"https://api.seer.ansrstudio.com/monitoring",
headers={"Authorization": api_key},
json=payload,
timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)
)
except requests.exceptions.Timeout:
# Handle timeout gracefully
print("SEER API timeout, saving locally")
save_offline(payload)def smart_truncate_logs(logs, max_size=50000):
"""
Keep beginning and end of logs, truncate middle
Useful for large log files
"""
if len(logs) <= max_size:
return logs
keep_size = max_size // 2
return (
logs[:keep_size] +
"\n\n... [TRUNCATED] ...\n\n" +
logs[-keep_size:]
)
# Usage
seer.error(
error_message=str(e),
logs=smart_truncate_logs(all_logs)
)SEER has a rate limit of 100 requests per minute across all API endpoints. This is sufficient for most use cases.
Note: If you consistently hit rate limits, consider consolidating multiple small scripts into larger jobs or contact support for higher limits.