How do I implement retry logic for failed requests in Alamofire?
Implementing retry logic in Alamofire is crucial for building resilient iOS applications that can handle network failures gracefully. Alamofire provides several approaches to implement retry mechanisms, from simple request retries to sophisticated interceptors with custom retry policies.
Understanding Alamofire's Retry Mechanisms
Alamofire offers built-in retry functionality through its interceptor system, which allows you to automatically retry failed requests based on custom conditions. The framework provides the RetryPolicy
protocol and a default RetryPolicy
implementation that can be customized to meet your specific requirements.
Basic Retry Implementation
Using the Default Retry Policy
The simplest way to implement retry logic is using Alamofire's built-in RetryPolicy
:
import Alamofire
// Create a session with default retry policy
let session = Session(interceptor: Interceptor(retryPolicy: RetryPolicy()))
// Make a request with automatic retry
session.request("https://api.example.com/data")
.responseJSON { response in
switch response.result {
case .success(let value):
print("Success: \(value)")
case .failure(let error):
print("Failed after retries: \(error)")
}
}
Custom Retry Policy Configuration
For more control, create a custom retry policy with specific parameters:
import Alamofire
class CustomRetryPolicy: RetryPolicy {
override init(retryLimit: UInt = 3,
exponentialBackoffBase: UInt = 2,
exponentialBackoffScale: TimeInterval = 0.5,
retryableHTTPMethods: Set<HTTPMethod> = [.GET, .POST, .PUT, .DELETE],
retryableHTTPStatusCodes: Set<Int> = [408, 429, 500, 502, 503, 504],
retryableURLErrorCodes: Set<URLError.Code> = [.timedOut, .networkConnectionLost]) {
super.init(
retryLimit: retryLimit,
exponentialBackoffBase: exponentialBackoffBase,
exponentialBackoffScale: exponentialBackoffScale,
retryableHTTPMethods: retryableHTTPMethods,
retryableHTTPStatusCodes: retryableHTTPStatusCodes,
retryableURLErrorCodes: retryableURLErrorCodes
)
}
}
// Use custom retry policy
let customRetryPolicy = CustomRetryPolicy(retryLimit: 5, exponentialBackoffScale: 1.0)
let session = Session(interceptor: Interceptor(retryPolicy: customRetryPolicy))
Advanced Retry Strategies
Implementing Custom Retry Logic
For complex scenarios, implement the RequestRetrier
protocol directly:
import Alamofire
class AdvancedRetryPolicy: RequestRetrier {
private let maxRetries: Int
private let retryDelay: TimeInterval
init(maxRetries: Int = 3, retryDelay: TimeInterval = 2.0) {
self.maxRetries = maxRetries
self.retryDelay = retryDelay
}
func retry(_ request: Request,
for session: Session,
dueTo error: Error,
completion: @escaping (RetryResult) -> Void) {
// Check retry count
guard request.retryCount < maxRetries else {
completion(.doNotRetry)
return
}
// Determine if we should retry based on error type
if shouldRetry(for: error) {
let delay = calculateDelay(for: request.retryCount)
completion(.retryWithDelay(delay))
} else {
completion(.doNotRetry)
}
}
private func shouldRetry(for error: Error) -> Bool {
// Custom logic to determine if request should be retried
if let afError = error.asAFError {
switch afError {
case .sessionTaskFailed(let urlError as URLError):
return [.timedOut, .networkConnectionLost, .notConnectedToInternet]
.contains(urlError.code)
case .responseValidationFailed, .responseSerializationFailed:
return false
default:
return true
}
}
if let httpResponse = error.asAFError?.responseCode {
return [408, 429, 500, 502, 503, 504].contains(httpResponse)
}
return false
}
private func calculateDelay(for retryCount: Int) -> TimeInterval {
// Exponential backoff with jitter
let baseDelay = retryDelay * pow(2.0, Double(retryCount))
let jitter = Double.random(in: 0...0.3) * baseDelay
return baseDelay + jitter
}
}
// Use advanced retry policy
let advancedRetrier = AdvancedRetryPolicy(maxRetries: 5, retryDelay: 1.0)
let session = Session(interceptor: Interceptor(retrier: advancedRetrier))
Conditional Retry Based on Response
Sometimes you need to retry based on the response content rather than just HTTP status codes:
class ConditionalRetryPolicy: RequestRetrier {
func retry(_ request: Request,
for session: Session,
dueTo error: Error,
completion: @escaping (RetryResult) -> Void) {
// Check if we have response data
if let response = request.response,
let data = request.data,
response.statusCode == 200 {
// Parse response to check for application-level errors
do {
let json = try JSONSerialization.jsonObject(with: data) as? [String: Any]
if let errorCode = json?["error_code"] as? String,
["RATE_LIMITED", "TEMPORARY_ERROR"].contains(errorCode) {
completion(.retryWithDelay(5.0))
return
}
} catch {
// JSON parsing failed, proceed with normal retry logic
}
}
// Fall back to standard retry logic
completion(request.retryCount < 3 ? .retryWithDelay(2.0) : .doNotRetry)
}
}
Implementing Circuit Breaker Pattern
For high-volume applications, consider implementing a circuit breaker pattern to prevent cascading failures:
class CircuitBreakerRetryPolicy: RequestRetrier {
private var failureCount = 0
private var lastFailureTime: Date?
private let failureThreshold = 5
private let recoveryTimeout: TimeInterval = 30
enum CircuitState {
case closed // Normal operation
case open // Failing fast
case halfOpen // Testing recovery
}
private var circuitState: CircuitState = .closed
func retry(_ request: Request,
for session: Session,
dueTo error: Error,
completion: @escaping (RetryResult) -> Void) {
switch circuitState {
case .closed:
handleClosedState(request: request, error: error, completion: completion)
case .open:
handleOpenState(completion: completion)
case .halfOpen:
handleHalfOpenState(request: request, error: error, completion: completion)
}
}
private func handleClosedState(request: Request,
error: Error,
completion: @escaping (RetryResult) -> Void) {
failureCount += 1
if failureCount >= failureThreshold {
circuitState = .open
lastFailureTime = Date()
completion(.doNotRetry)
} else if request.retryCount < 3 {
completion(.retryWithDelay(2.0))
} else {
completion(.doNotRetry)
}
}
private func handleOpenState(completion: @escaping (RetryResult) -> Void) {
guard let lastFailure = lastFailureTime,
Date().timeIntervalSince(lastFailure) > recoveryTimeout else {
completion(.doNotRetry)
return
}
circuitState = .halfOpen
completion(.retryWithDelay(0))
}
private func handleHalfOpenState(request: Request,
error: Error,
completion: @escaping (RetryResult) -> Void) {
// If request succeeds, the circuit will be closed elsewhere
// If it fails, open the circuit again
circuitState = .open
lastFailureTime = Date()
completion(.doNotRetry)
}
func recordSuccess() {
failureCount = 0
circuitState = .closed
lastFailureTime = nil
}
}
Best Practices for Retry Logic
1. Configure Appropriate Timeouts
Always set reasonable timeouts to prevent indefinite waiting:
let session = Session(configuration: {
let config = URLSessionConfiguration.default
config.timeoutIntervalForRequest = 30
config.timeoutIntervalForResource = 60
return config
}())
session.request("https://api.example.com/data")
.validate()
.responseJSON { response in
// Handle response
}
2. Implement Exponential Backoff with Jitter
Prevent thundering herd problems by adding randomization to your backoff strategy:
private func calculateBackoffDelay(attempt: Int, baseDelay: TimeInterval = 1.0) -> TimeInterval {
let exponentialDelay = baseDelay * pow(2.0, Double(attempt))
let jitter = Double.random(in: 0.5...1.5)
return min(exponentialDelay * jitter, 60.0) // Cap at 60 seconds
}
3. Monitor and Log Retry Attempts
Implement comprehensive logging to track retry behavior:
class LoggingRetryPolicy: RequestRetrier {
func retry(_ request: Request,
for session: Session,
dueTo error: Error,
completion: @escaping (RetryResult) -> Void) {
let retryCount = request.retryCount
let url = request.request?.url?.absoluteString ?? "Unknown URL"
print("Retry attempt \(retryCount + 1) for \(url)")
print("Error: \(error.localizedDescription)")
if retryCount < 3 {
let delay = calculateBackoffDelay(attempt: retryCount)
print("Retrying in \(delay) seconds")
completion(.retryWithDelay(delay))
} else {
print("Max retries reached for \(url)")
completion(.doNotRetry)
}
}
}
Testing Retry Logic
Create unit tests to verify your retry behavior:
import XCTest
import Alamofire
class RetryPolicyTests: XCTestCase {
func testRetryPolicy() {
let expectation = XCTestExpectation(description: "Request should retry and eventually succeed")
let retryPolicy = CustomRetryPolicy(retryLimit: 3)
let session = Session(interceptor: Interceptor(retryPolicy: retryPolicy))
// Mock server that fails first few requests
session.request("https://httpbin.org/status/500")
.responseJSON { response in
// Verify retry behavior
XCTAssertTrue(response.metrics?.taskMetrics.count ?? 0 > 1)
expectation.fulfill()
}
wait(for: [expectation], timeout: 30.0)
}
}
Integration with Error Handling
When building robust applications, retry logic should work seamlessly with comprehensive error handling strategies and timeout management. This ensures your application can gracefully recover from various failure scenarios while providing users with meaningful feedback.
Conclusion
Implementing retry logic in Alamofire requires careful consideration of your application's requirements, network conditions, and user experience. Start with Alamofire's built-in retry policies for simple cases, then implement custom solutions for more complex scenarios. Always include exponential backoff, proper logging, and circuit breaker patterns for production applications.
Remember to test your retry logic thoroughly and monitor its behavior in production to ensure it provides the resilience your application needs without overwhelming backend services or degrading user experience.