How do I send multiple files in a single request using Requests?

Sending Multiple Files with Python Requests

To send multiple files in a single HTTP request using Python's requests library, you need to create a multipart/form-data request. This is the standard method for file uploads in web forms.

Basic Method

The key is to construct a files dictionary where: - Keys: Form field names expected by the server - Values: Tuples containing (filename, file_object, content_type)

import requests

url = 'https://httpbin.org/post'

# Method 1: Using file paths with context manager
with open('document.pdf', 'rb') as pdf_file, open('image.jpg', 'rb') as img_file:
    files = {
        'document': ('report.pdf', pdf_file, 'application/pdf'),
        'thumbnail': ('preview.jpg', img_file, 'image/jpeg')
    }

    response = requests.post(url, files=files)
    print(f"Status: {response.status_code}")
    print(f"Response: {response.json()}")

Alternative Syntax Options

Simplified Format

# Method 2: Simplified tuple format (filename, file_object)
with open('file1.txt', 'rb') as f1, open('file2.txt', 'rb') as f2:
    files = {
        'file1': ('data1.txt', f1),
        'file2': ('data2.txt', f2)
    }
    response = requests.post(url, files=files)

Using File Objects Directly

# Method 3: Just file objects (uses original filename)
with open('config.json', 'rb') as config, open('data.csv', 'rb') as data:
    files = {
        'config': config,
        'dataset': data
    }
    response = requests.post(url, files=files)

Advanced Examples

Multiple Files of Same Type

# Uploading multiple images with same form field name
image_files = ['photo1.jpg', 'photo2.jpg', 'photo3.jpg']

files = []
for i, filepath in enumerate(image_files):
    files.append(('images', (f'image_{i+1}.jpg', open(filepath, 'rb'), 'image/jpeg')))

response = requests.post(url, files=files)

# Don't forget to close the files
for _, (_, file_obj, _) in files:
    file_obj.close()

Combining Files with Form Data

# Send files along with regular form data
files = {
    'avatar': ('profile.png', open('avatar.png', 'rb'), 'image/png'),
    'resume': ('cv.pdf', open('resume.pdf', 'rb'), 'application/pdf')
}

data = {
    'username': 'john_doe',
    'email': 'john@example.com',
    'description': 'Profile update'
}

response = requests.post(url, files=files, data=data)

# Close files
for field, (_, file_obj, _) in files.items():
    file_obj.close()

Using BytesIO for In-Memory Files

from io import BytesIO
import json

# Create files in memory
json_data = BytesIO(json.dumps({'key': 'value'}).encode('utf-8'))
text_data = BytesIO(b'Hello, World!')

files = {
    'config': ('settings.json', json_data, 'application/json'),
    'message': ('greeting.txt', text_data, 'text/plain')
}

response = requests.post(url, files=files)

Error Handling and Best Practices

Robust Error Handling

import requests
from requests.exceptions import RequestException

def upload_multiple_files(url, file_paths, field_names):
    try:
        files = {}
        file_objects = []

        # Open all files
        for field_name, file_path in zip(field_names, file_paths):
            file_obj = open(file_path, 'rb')
            file_objects.append(file_obj)
            files[field_name] = (file_path.split('/')[-1], file_obj)

        # Make the request
        response = requests.post(url, files=files, timeout=30)
        response.raise_for_status()  # Raises exception for HTTP errors

        return response

    except FileNotFoundError as e:
        print(f"File not found: {e}")
    except RequestException as e:
        print(f"Request failed: {e}")
    except Exception as e:
        print(f"Unexpected error: {e}")
    finally:
        # Always close file objects
        for file_obj in file_objects:
            if not file_obj.closed:
                file_obj.close()

# Usage
response = upload_multiple_files(
    'https://httpbin.org/post',
    ['doc1.pdf', 'doc2.pdf'],
    ['document1', 'document2']
)

Using Context Managers (Recommended)

def upload_files_safely(url, file_configs):
    """
    file_configs: List of tuples (field_name, file_path, content_type)
    """
    files = {}

    # Build context manager for multiple files
    file_contexts = [open(file_path, 'rb') for _, file_path, _ in file_configs]

    try:
        with contextlib.ExitStack() as stack:
            for i, (field_name, file_path, content_type) in enumerate(file_configs):
                file_obj = stack.enter_context(file_contexts[i])
                filename = file_path.split('/')[-1]
                files[field_name] = (filename, file_obj, content_type)

            response = requests.post(url, files=files)
            return response

    except Exception as e:
        print(f"Upload failed: {e}")
        return None

# Usage
import contextlib

file_configs = [
    ('document', 'report.pdf', 'application/pdf'),
    ('image', 'chart.png', 'image/png'),
    ('data', 'results.csv', 'text/csv')
]

response = upload_files_safely('https://httpbin.org/post', file_configs)

Common Issues and Solutions

Server Field Name Matching

# Always check API documentation for expected field names
# Example: Server expects 'file[]' for multiple files
files = [
    ('file[]', ('doc1.pdf', open('doc1.pdf', 'rb'), 'application/pdf')),
    ('file[]', ('doc2.pdf', open('doc2.pdf', 'rb'), 'application/pdf'))
]

response = requests.post(url, files=files)

Large File Handling

# For large files, consider streaming uploads
def upload_large_files(url, file_paths):
    files = {}

    for i, file_path in enumerate(file_paths):
        # Use streaming for large files
        files[f'file_{i}'] = (
            file_path.split('/')[-1],
            open(file_path, 'rb'),
            'application/octet-stream'
        )

    # Stream the request
    response = requests.post(url, files=files, stream=True)

    # Close files
    for field, (_, file_obj, _) in files.items():
        file_obj.close()

    return response

This approach gives you complete control over multipart file uploads with the Python requests library, handling both simple and complex scenarios while maintaining proper resource management.

Related Questions

Get Started Now

WebScraping.AI provides rotating proxies, Chromium rendering and built-in HTML parser for web scraping
Icon