This guide covers comprehensive error handling strategies when using the ViewAI Python SDK.
Overview
The ViewAI SDK provides a structured exception hierarchy to help you handle errors gracefully. Understanding these exceptions and implementing proper error handling ensures robust, production-ready applications.
Exception Hierarchy
The ViewAI SDK uses a hierarchical exception structure:
from viewai_client.exceptions import AuthenticationError
try:
client = ViewAIClient(api_key="invalid-key")
workspace = client.retrieve_default_workspace()
except AuthenticationError as e:
print(f"Authentication failed: {e.message}")
# Solution: Check your API key
print("Solution: Verify your API key is correct")
# Check API key format
def validate_api_key(api_key):
"""Validate API key format."""
if not api_key:
raise ValueError("API key is required")
if len(api_key) < 32:
raise ValueError("API key appears to be invalid (too short)")
return True
# Use validation
try:
validate_api_key(api_key)
client = ViewAIClient(api_key=api_key)
except ValueError as e:
print(f"API key validation error: {e}")
from viewai_client.exceptions import ValidationError
try:
# Missing required features
result = client.execute_single_point_prediction(
data={"age": 35}, # Missing other required features
model_id="model-123"
)
except ValidationError as e:
print(f"Validation error: {e.message}")
print(f"Details: {e.details}")
# Solution: Include all required features
def validate_prediction_data(data, required_features, feature_types):
"""Validate prediction data before submission."""
errors = []
# Check required features
missing = [f for f in required_features if f not in data]
if missing:
errors.append(f"Missing features: {missing}")
# Check data types
for feature, expected_type in feature_types.items():
if feature in data:
value = data[feature]
if not isinstance(value, expected_type):
errors.append(
f"Feature {feature}: expected {expected_type}, "
f"got {type(value)}"
)
if errors:
raise ValidationError(
"Data validation failed",
details={"errors": errors}
)
return True
# Use validation
required_features = ["age", "income", "credit_score"]
feature_types = {"age": int, "income": float, "credit_score": int}
try:
validate_prediction_data(data, required_features, feature_types)
result = client.execute_single_point_prediction(data, "model-123")
except ValidationError as e:
print(f"Validation failed: {e.message}")
for error in e.details.get("errors", []):
print(f" - {error}")
from viewai_client.exceptions import APIError
try:
result = client.execute_prediction(data, model_id="invalid-id")
except APIError as e:
print(f"API error: {e.message}")
print(f"Status code: {e.status_code}")
print(f"Response: {e.response}")
# Handle based on status code
if e.status_code == 404:
print("Model not found")
elif e.status_code == 429:
print("Rate limit exceeded")
elif e.status_code >= 500:
print("Server error - retry later")
def handle_api_error(error):
"""Handle API errors based on status code."""
status_code = error.status_code
if status_code == 400:
return "Check your input data format"
elif status_code == 401:
return "Verify your API key is correct"
elif status_code == 404:
return "Check that model/workspace exists"
elif status_code == 429:
return "Wait before retrying (rate limited)"
elif status_code >= 500:
return "Server error - retry with exponential backoff"
else:
return f"Unknown error (status {status_code})"
# Use error handler
try:
result = client.execute_prediction(data, "model-123")
except APIError as e:
solution = handle_api_error(e)
print(f"Error: {e.message}")
print(f"Solution: {solution}")
from viewai_client.exceptions import NetworkError
try:
job = client.execute_batch_prediction(
data=large_df,
model_id="model-123"
)
except NetworkError as e:
print(f"Network error: {e.message}")
# Solution: Check internet connection and retry
import time
from requests.exceptions import RequestException
def retry_on_network_error(func, max_retries=3, backoff_factor=2):
"""Retry function on network errors."""
for attempt in range(max_retries):
try:
return func()
except NetworkError as e:
if attempt < max_retries - 1:
wait_time = backoff_factor ** attempt
print(f"Network error, retrying in {wait_time}s...")
time.sleep(wait_time)
else:
raise
# Use retry wrapper
def make_prediction():
return client.execute_prediction(data, "model-123")
try:
result = retry_on_network_error(make_prediction)
except NetworkError as e:
print(f"Network error after retries: {e.message}")
from viewai_client.exceptions import ConfigurationError
from viewai_client.config import ClientConfiguration
try:
config = ClientConfiguration(
api_key="", # Empty API key
timeout=-1 # Invalid timeout
)
client = ViewAIClient(config=config)
except ConfigurationError as e:
print(f"Configuration error: {e.message}")
def validate_configuration(config_dict):
"""Validate client configuration."""
errors = []
# Check required fields
if not config_dict.get("api_key"):
errors.append("API key is required")
# Check numeric fields
timeout = config_dict.get("timeout", 30)
if timeout <= 0:
errors.append("Timeout must be positive")
max_retries = config_dict.get("max_retries", 3)
if max_retries < 0:
errors.append("Max retries cannot be negative")
if errors:
raise ConfigurationError(
"Invalid configuration",
details={"errors": errors}
)
return True
# Use validation
try:
validate_configuration({"api_key": api_key, "timeout": 30})
client = ViewAIClient(api_key=api_key, timeout=30)
except ConfigurationError as e:
print(f"Configuration error: {e.message}")
from viewai_client.exceptions import TimeoutError
try:
# Operation with timeout
result = client.execute_prediction(data, "model-123")
except TimeoutError as e:
print(f"Operation timed out: {e.message}")
# Solution: Increase timeout or use async operations
from viewai_client.exceptions import ViewAIError
try:
result = client.execute_prediction(data, "model-123")
if result:
print(f"Prediction: {result.get_probabilities()}")
else:
print("Prediction returned None")
except ViewAIError as e:
print(f"Error: {e.message}")
from viewai_client.exceptions import (
AuthenticationError,
ValidationError,
APIError,
NetworkError
)
try:
result = client.execute_prediction(data, "model-123")
except AuthenticationError as e:
print(f"Authentication failed: {e.message}")
# Re-authenticate or request new API key
handle_authentication_failure()
except ValidationError as e:
print(f"Data validation failed: {e.message}")
# Fix data and retry
fixed_data = fix_validation_errors(data, e.details)
result = client.execute_prediction(fixed_data, "model-123")
except APIError as e:
print(f"API error: {e.message} (status: {e.status_code})")
# Handle based on status code
if e.status_code >= 500:
# Server error - retry
time.sleep(5)
result = client.execute_prediction(data, "model-123")
except NetworkError as e:
print(f"Network error: {e.message}")
# Retry with exponential backoff
result = retry_with_backoff(
lambda: client.execute_prediction(data, "model-123")
)
except Exception as e:
print(f"Unexpected error: {e}")
# Log and notify
log_unexpected_error(e)
class ErrorHandlingClient:
"""Client wrapper with error handling."""
def __init__(self, api_key):
self.client = ViewAIClient(api_key=api_key)
def __enter__(self):
return self.client
def __exit__(self, exc_type, exc_val, exc_tb):
# Handle exceptions and cleanup
if exc_type is None:
return True # No exception
if issubclass(exc_type, ViewAIError):
print(f"ViewAI error occurred: {exc_val.message}")
# Log error
log_error(exc_val)
return True # Suppress exception
return False # Don't suppress other exceptions
# Usage
with ErrorHandlingClient("your-api-key") as client:
result = client.execute_prediction(data, "model-123")
# Errors are logged but don't crash the program
from functools import wraps
def handle_viewai_errors(func):
"""Decorator to handle ViewAI errors."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ViewAIError as e:
print(f"ViewAI error in {func.__name__}: {e.message}")
return None
except Exception as e:
print(f"Unexpected error in {func.__name__}: {e}")
raise
return wrapper
# Use decorator
@handle_viewai_errors
def make_prediction(client, data, model_id):
"""Make prediction with error handling."""
return client.execute_prediction(data, model_id)
# Errors are handled automatically
result = make_prediction(client, data, "model-123")
import time
from typing import Callable, Any
def exponential_backoff_retry(
func: Callable,
max_retries: int = 3,
base_delay: float = 1.0,
max_delay: float = 60.0,
exceptions: tuple = (NetworkError, APIError)
) -> Any:
"""Retry function with exponential backoff."""
for attempt in range(max_retries):
try:
return func()
except exceptions as e:
if attempt >= max_retries - 1:
# Last attempt failed
raise
# Calculate delay with exponential backoff
delay = min(base_delay * (2 ** attempt), max_delay)
print(f"Attempt {attempt + 1} failed: {e}")
print(f"Retrying in {delay:.1f}s...")
time.sleep(delay)
# Use exponential backoff
def make_prediction():
return client.execute_prediction(data, "model-123")
try:
result = exponential_backoff_retry(
make_prediction,
max_retries=5,
base_delay=1.0
)
except Exception as e:
print(f"Failed after retries: {e}")
import random
def retry_with_jitter(
func: Callable,
max_retries: int = 3,
base_delay: float = 1.0
) -> Any:
"""Retry with jittered exponential backoff."""
for attempt in range(max_retries):
try:
return func()
except (NetworkError, APIError) as e:
if attempt >= max_retries - 1:
raise
# Exponential backoff with jitter
delay = base_delay * (2 ** attempt)
jittered_delay = delay * (0.5 + random.random())
print(f"Retrying in {jittered_delay:.1f}s...")
time.sleep(jittered_delay)
# Use jittered retry
result = retry_with_jitter(
lambda: client.execute_prediction(data, "model-123")
)
from viewai_client.config import RetryStrategy
class RetryConfig:
"""Retry configuration."""
def __init__(
self,
max_attempts: int = 3,
base_delay: float = 1.0,
max_delay: float = 60.0,
exponential_base: float = 2.0,
jitter: bool = True
):
self.max_attempts = max_attempts
self.base_delay = base_delay
self.max_delay = max_delay
self.exponential_base = exponential_base
self.jitter = jitter
def calculate_delay(self, attempt: int) -> float:
"""Calculate delay for given attempt."""
delay = self.base_delay * (self.exponential_base ** attempt)
delay = min(delay, self.max_delay)
if self.jitter:
delay *= (0.5 + random.random())
return delay
def retry_with_config(func: Callable, config: RetryConfig) -> Any:
"""Retry with custom configuration."""
for attempt in range(config.max_attempts):
try:
return func()
except (NetworkError, APIError) as e:
if attempt >= config.max_attempts - 1:
raise
delay = config.calculate_delay(attempt)
print(f"Retrying in {delay:.1f}s...")
time.sleep(delay)
# Use custom retry config
retry_config = RetryConfig(
max_attempts=5,
base_delay=2.0,
max_delay=30.0,
jitter=True
)
result = retry_with_config(
lambda: client.execute_prediction(data, "model-123"),
retry_config
)
import logging
# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('viewai.log'),
logging.StreamHandler()
]
)
# Set ViewAI client log level
client = ViewAIClient(api_key="your-api-key")
client.configure_logging_level(logging.DEBUG)
# Now all SDK operations are logged
result = client.execute_prediction(data, "model-123")
import logging
from datetime import datetime
class ErrorLogger:
"""Custom error logger for ViewAI operations."""
def __init__(self, log_file="errors.log"):
self.logger = logging.getLogger("ViewAIErrors")
self.logger.setLevel(logging.ERROR)
# File handler
fh = logging.FileHandler(log_file)
fh.setLevel(logging.ERROR)
# Formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def log_error(self, error, context=None):
"""Log error with context."""
error_info = {
"timestamp": datetime.now().isoformat(),
"error_type": type(error).__name__,
"message": str(error),
"context": context or {}
}
if isinstance(error, ViewAIError):
error_info["details"] = error.details
if isinstance(error, APIError):
error_info["status_code"] = error.status_code
error_info["response"] = error.response
self.logger.error(json.dumps(error_info))
# Use error logger
error_logger = ErrorLogger("viewai_errors.log")
try:
result = client.execute_prediction(data, "model-123")
except ViewAIError as e:
error_logger.log_error(
e,
context={"operation": "prediction", "model_id": "model-123"}
)
class DebugClient:
"""ViewAI client with debug mode."""
def __init__(self, api_key, debug=False):
self.client = ViewAIClient(api_key=api_key)
self.debug = debug
if debug:
self.client.configure_logging_level(logging.DEBUG)
def execute_prediction(self, data, model_id):
"""Execute prediction with debug logging."""
if self.debug:
print(f"[DEBUG] Executing prediction")
print(f"[DEBUG] Model ID: {model_id}")
print(f"[DEBUG] Data: {data}")
try:
result = self.client.execute_prediction(data, model_id)
if self.debug:
print(f"[DEBUG] Prediction successful")
print(f"[DEBUG] Result: {result}")
return result
except Exception as e:
if self.debug:
print(f"[DEBUG] Prediction failed: {e}")
import traceback
traceback.print_exc()
raise
# Use debug client
debug_client = DebugClient("your-api-key", debug=True)
result = debug_client.execute_prediction(data, "model-123")
import logging
from typing import Optional, Dict, Any
class ProductionErrorHandler:
"""Production-grade error handler."""
def __init__(self, client: ViewAIClient, alert_webhook: Optional[str] = None):
self.client = client
self.alert_webhook = alert_webhook
self.logger = logging.getLogger("ViewAI.Production")
def execute_with_handling(
self,
func: Callable,
operation_name: str,
context: Dict[str, Any] = None
) -> Optional[Any]:
"""Execute function with comprehensive error handling."""
context = context or {}
try:
return func()
except AuthenticationError as e:
self._handle_auth_error(e, operation_name, context)
return None
except ValidationError as e:
self._handle_validation_error(e, operation_name, context)
return None
except APIError as e:
return self._handle_api_error(e, operation_name, context, func)
except NetworkError as e:
return self._handle_network_error(e, operation_name, context, func)
except Exception as e:
self._handle_unexpected_error(e, operation_name, context)
return None
def _handle_auth_error(self, error, operation, context):
"""Handle authentication errors."""
self.logger.critical(
f"Authentication failed in {operation}: {error.message}"
)
# Alert ops team
self._send_alert("CRITICAL: Authentication failed", error, context)
def _handle_validation_error(self, error, operation, context):
"""Handle validation errors."""
self.logger.error(
f"Validation failed in {operation}: {error.message}"
)
# Log for data team to review
self._log_validation_failure(error, context)
def _handle_api_error(self, error, operation, context, retry_func):
"""Handle API errors with retry logic."""
self.logger.error(
f"API error in {operation}: {error.message} "
f"(status: {error.status_code})"
)
# Retry for server errors
if error.status_code >= 500:
self.logger.info(f"Retrying {operation} due to server error")
return exponential_backoff_retry(retry_func, max_retries=3)
return None
def _handle_network_error(self, error, operation, context, retry_func):
"""Handle network errors with retry."""
self.logger.warning(
f"Network error in {operation}: {error.message}"
)
# Retry network errors
return exponential_backoff_retry(retry_func, max_retries=5)
def _handle_unexpected_error(self, error, operation, context):
"""Handle unexpected errors."""
self.logger.exception(
f"Unexpected error in {operation}: {error}"
)
# Alert ops team
self._send_alert("CRITICAL: Unexpected error", error, context)
def _send_alert(self, title, error, context):
"""Send alert to monitoring system."""
if self.alert_webhook:
# Send to webhook (Slack, PagerDuty, etc.)
pass
def _log_validation_failure(self, error, context):
"""Log validation failure for analysis."""
# Save to database for data team review
pass
# Use production error handler
handler = ProductionErrorHandler(
client=client,
alert_webhook="https://hooks.slack.com/..."
)
result = handler.execute_with_handling(
func=lambda: client.execute_prediction(data, "model-123"),
operation_name="customer_churn_prediction",
context={"customer_id": "12345"}
)
result = client.execute_prediction(data, "model-123")
try:
result = client.execute_prediction(data, "model-123")
except ViewAIError as e:
print(f"Error: {e.message}")
result = None
try:
result = client.execute_prediction(data, "model-123")
except Exception as e:
print(f"Error: {e}")
try:
result = client.execute_prediction(data, "model-123")
except ValidationError as e:
print(f"Validation error: {e.message}")
except APIError as e:
print(f"API error (status {e.status_code}): {e.message}")
except ViewAIError as e:
print(f"ViewAI error: {e.message}")
def get_user_friendly_message(error):
"""Convert error to user-friendly message."""
if isinstance(error, AuthenticationError):
return "Unable to authenticate. Please check your credentials."
if isinstance(error, ValidationError):
return "Invalid input data. Please check your data and try again."
if isinstance(error, NetworkError):
return "Connection error. Please check your internet connection."
if isinstance(error, APIError):
if error.status_code == 404:
return "Model not found. Please verify the model ID."
elif error.status_code == 429:
return "Too many requests. Please try again in a few moments."
elif error.status_code >= 500:
return "Service temporarily unavailable. Please try again later."
return "An unexpected error occurred. Please contact support."
# Use user-friendly messages
try:
result = client.execute_prediction(data, "model-123")
except ViewAIError as e:
user_message = get_user_friendly_message(e)
print(user_message)
def predict_with_fallback(client, data, model_id, fallback_value=None):
"""Make prediction with fallback value."""
try:
result = client.execute_prediction(data, model_id)
return result
except ViewAIError as e:
logger.error(f"Prediction failed: {e.message}")
# Return fallback value
return fallback_value
# Use with fallback
result = predict_with_fallback(
client,
data,
"model-123",
fallback_value={"prediction": 0, "confidence": 0.0}
)
from collections import defaultdict
from datetime import datetime
class ErrorMetrics:
"""Track error metrics."""
def __init__(self):
self.error_counts = defaultdict(int)
self.total_operations = 0
def record_error(self, error_type):
"""Record an error occurrence."""
self.error_counts[error_type] += 1
def record_success(self):
"""Record a successful operation."""
self.total_operations += 1
def get_error_rate(self, error_type):
"""Get error rate for specific error type."""
if self.total_operations == 0:
return 0.0
return self.error_counts[error_type] / self.total_operations
def get_summary(self):
"""Get error summary."""
return {
"total_operations": self.total_operations,
"error_counts": dict(self.error_counts),
"error_rates": {
error_type: self.get_error_rate(error_type)
for error_type in self.error_counts
}
}
# Use error metrics
metrics = ErrorMetrics()
try:
result = client.execute_prediction(data, "model-123")
metrics.record_success()
except ViewAIError as e:
metrics.record_error(type(e).__name__)
# Check metrics
summary = metrics.get_summary()
print(f"Error summary: {summary}")