Table of contents

How do I extract data from PDF files using JavaScript?

Extracting data from PDF files in JavaScript can be accomplished through several approaches, each suited for different environments and use cases. Whether you're working in the browser, Node.js, or need to handle complex PDF structures, this guide covers the most effective methods and libraries available.

Overview of PDF Data Extraction Methods

PDF data extraction in JavaScript can be categorized into three main approaches:

  1. Client-side extraction using PDF.js in browsers
  2. Server-side extraction using Node.js libraries like pdf-parse
  3. Headless browser extraction using Puppeteer for complex PDFs

Method 1: Using PDF.js for Browser-Based Extraction

PDF.js is Mozilla's JavaScript library for rendering PDF files in browsers. It's excellent for client-side PDF data extraction without requiring server-side processing.

Installing PDF.js

npm install pdfjs-dist

Basic Text Extraction with PDF.js

import * as pdfjsLib from 'pdfjs-dist';

// Set up PDF.js worker
pdfjsLib.GlobalWorkerOptions.workerSrc = 'https://cdnjs.cloudflare.com/ajax/libs/pdf.js/3.11.174/pdf.worker.min.js';

async function extractTextFromPDF(pdfUrl) {
    try {
        // Load the PDF document
        const pdf = await pdfjsLib.getDocument(pdfUrl).promise;
        let fullText = '';

        // Extract text from each page
        for (let pageNum = 1; pageNum <= pdf.numPages; pageNum++) {
            const page = await pdf.getPage(pageNum);
            const textContent = await page.getTextContent();

            // Combine text items
            const pageText = textContent.items
                .map(item => item.str)
                .join(' ');

            fullText += `Page ${pageNum}:\n${pageText}\n\n`;
        }

        return fullText;
    } catch (error) {
        console.error('Error extracting PDF text:', error);
        throw error;
    }
}

// Usage example
extractTextFromPDF('path/to/document.pdf')
    .then(text => console.log(text))
    .catch(error => console.error(error));

Extracting Structured Data with PDF.js

async function extractStructuredData(pdfUrl) {
    const pdf = await pdfjsLib.getDocument(pdfUrl).promise;
    const metadata = await pdf.getMetadata();

    const result = {
        title: metadata.info.Title,
        author: metadata.info.Author,
        subject: metadata.info.Subject,
        pages: pdf.numPages,
        content: []
    };

    for (let pageNum = 1; pageNum <= pdf.numPages; pageNum++) {
        const page = await pdf.getPage(pageNum);
        const textContent = await page.getTextContent();

        const pageData = {
            pageNumber: pageNum,
            text: textContent.items.map(item => ({
                content: item.str,
                x: item.transform[4],
                y: item.transform[5],
                width: item.width,
                height: item.height
            }))
        };

        result.content.push(pageData);
    }

    return result;
}

Method 2: Server-Side Extraction with pdf-parse

For Node.js environments, pdf-parse provides a lightweight solution for PDF text extraction.

Installing pdf-parse

npm install pdf-parse

Basic Implementation

const fs = require('fs');
const pdf = require('pdf-parse');

async function extractPDFData(filePath) {
    try {
        const dataBuffer = fs.readFileSync(filePath);
        const data = await pdf(dataBuffer);

        return {
            pages: data.numpages,
            text: data.text,
            info: data.info,
            metadata: data.metadata,
            version: data.version
        };
    } catch (error) {
        console.error('Error parsing PDF:', error);
        throw error;
    }
}

// Usage
extractPDFData('./document.pdf')
    .then(result => {
        console.log('Number of pages:', result.pages);
        console.log('Content:', result.text);
    })
    .catch(error => console.error(error));

Advanced pdf-parse Configuration

const pdfOptions = {
    // Maximum number of pages to parse
    max: 10,

    // Custom page rendering function
    pagerender: function(pageData) {
        // Return text content with custom formatting
        return pageData.getTextContent().then(textContent => {
            return textContent.items
                .map(item => item.str)
                .join(' ')
                .replace(/\s+/g, ' ') // Normalize whitespace
                .trim();
        });
    }
};

async function extractWithOptions(filePath) {
    const dataBuffer = fs.readFileSync(filePath);
    const data = await pdf(dataBuffer, pdfOptions);
    return data;
}

Method 3: Using Puppeteer for Complex PDFs

When dealing with complex PDFs or needing to extract data from PDF viewers, Puppeteer provides powerful automation capabilities for PDF data extraction.

Installing Puppeteer

npm install puppeteer

PDF Text Extraction with Puppeteer

const puppeteer = require('puppeteer');

async function extractPDFWithPuppeteer(pdfUrl) {
    const browser = await puppeteer.launch({
        headless: true,
        args: ['--no-sandbox', '--disable-setuid-sandbox']
    });

    try {
        const page = await browser.newPage();

        // Navigate to PDF URL or local file
        await page.goto(pdfUrl, { waitUntil: 'networkidle0' });

        // Wait for PDF to load
        await page.waitForSelector('div[data-page-number]', { timeout: 10000 });

        // Extract text from all pages
        const pdfText = await page.evaluate(() => {
            const textLayers = document.querySelectorAll('.textLayer');
            return Array.from(textLayers).map((layer, index) => ({
                page: index + 1,
                text: layer.innerText || layer.textContent
            }));
        });

        return pdfText;
    } finally {
        await browser.close();
    }
}

// Usage
extractPDFWithPuppeteer('https://example.com/document.pdf')
    .then(result => console.log(result))
    .catch(error => console.error(error));

Handling PDF Forms with Puppeteer

async function extractPDFFormData(pdfUrl) {
    const browser = await puppeteer.launch();
    const page = await browser.newPage();

    await page.goto(pdfUrl);

    // Wait for form elements to load
    await page.waitForTimeout(2000);

    // Extract form field data
    const formData = await page.evaluate(() => {
        const inputs = document.querySelectorAll('input, select, textarea');
        const fields = {};

        inputs.forEach(input => {
            if (input.name || input.id) {
                fields[input.name || input.id] = {
                    type: input.type,
                    value: input.value,
                    placeholder: input.placeholder
                };
            }
        });

        return fields;
    });

    await browser.close();
    return formData;
}

Method 4: Handling PDF Downloads and Processing

When PDFs need to be downloaded before processing, combine file download handling with extraction methods.

const axios = require('axios');
const fs = require('fs');
const pdf = require('pdf-parse');

async function downloadAndExtractPDF(pdfUrl, outputPath) {
    try {
        // Download PDF
        const response = await axios({
            method: 'GET',
            url: pdfUrl,
            responseType: 'stream'
        });

        // Save to file
        const writer = fs.createWriteStream(outputPath);
        response.data.pipe(writer);

        await new Promise((resolve, reject) => {
            writer.on('finish', resolve);
            writer.on('error', reject);
        });

        // Extract data
        const dataBuffer = fs.readFileSync(outputPath);
        const pdfData = await pdf(dataBuffer);

        // Clean up file
        fs.unlinkSync(outputPath);

        return pdfData;
    } catch (error) {
        console.error('Error downloading/extracting PDF:', error);
        throw error;
    }
}

Advanced Data Processing Techniques

Regular Expression Patterns for Data Extraction

function extractSpecificData(pdfText) {
    const patterns = {
        emails: /[\w\.-]+@[\w\.-]+\.\w+/g,
        phones: /(\+?\d{1,3}[-.\s]?)?\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}/g,
        dates: /\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/g,
        amounts: /\$\d{1,3}(?:,\d{3})*(?:\.\d{2})?/g
    };

    const extracted = {};

    Object.keys(patterns).forEach(key => {
        extracted[key] = pdfText.match(patterns[key]) || [];
    });

    return extracted;
}

// Usage with pdf-parse
extractPDFData('./invoice.pdf')
    .then(result => {
        const specificData = extractSpecificData(result.text);
        console.log('Extracted data:', specificData);
    });

Table Data Extraction

function extractTableData(pdfText) {
    // Split text into lines
    const lines = pdfText.split('\n').map(line => line.trim()).filter(line => line);

    const tables = [];
    let currentTable = [];

    lines.forEach(line => {
        // Detect table rows (adjust pattern based on your PDF structure)
        if (line.includes('\t') || /\s{2,}/.test(line)) {
            // Split by multiple spaces or tabs
            const columns = line.split(/\s{2,}|\t/).filter(col => col.trim());
            if (columns.length > 1) {
                currentTable.push(columns);
            }
        } else if (currentTable.length > 0) {
            // End of table
            tables.push([...currentTable]);
            currentTable = [];
        }
    });

    // Add last table if exists
    if (currentTable.length > 0) {
        tables.push(currentTable);
    }

    return tables;
}

Error Handling and Best Practices

Robust Error Handling

class PDFExtractor {
    constructor(options = {}) {
        this.timeout = options.timeout || 30000;
        this.retries = options.retries || 3;
    }

    async extractWithRetry(pdfSource, method = 'pdf-parse') {
        let lastError;

        for (let attempt = 1; attempt <= this.retries; attempt++) {
            try {
                switch (method) {
                    case 'pdf-parse':
                        return await this.extractWithPdfParse(pdfSource);
                    case 'pdfjs':
                        return await this.extractWithPdfJs(pdfSource);
                    case 'puppeteer':
                        return await this.extractWithPuppeteer(pdfSource);
                    default:
                        throw new Error(`Unknown extraction method: ${method}`);
                }
            } catch (error) {
                lastError = error;
                console.warn(`Attempt ${attempt} failed:`, error.message);

                if (attempt < this.retries) {
                    await this.delay(1000 * attempt); // Exponential backoff
                }
            }
        }

        throw lastError;
    }

    async delay(ms) {
        return new Promise(resolve => setTimeout(resolve, ms));
    }

    async extractWithPdfParse(filePath) {
        const dataBuffer = fs.readFileSync(filePath);
        return await pdf(dataBuffer);
    }

    // Add other extraction methods...
}

// Usage
const extractor = new PDFExtractor({ retries: 3, timeout: 30000 });
extractor.extractWithRetry('./document.pdf', 'pdf-parse')
    .then(result => console.log('Success:', result))
    .catch(error => console.error('All attempts failed:', error));

Performance Optimization

Memory-Efficient Processing for Large PDFs

async function processLargePDF(filePath, pageCallback) {
    const pdf = await pdfjsLib.getDocument(filePath).promise;
    const results = [];

    // Process pages in batches to manage memory
    const batchSize = 5;

    for (let i = 1; i <= pdf.numPages; i += batchSize) {
        const batch = [];
        const endPage = Math.min(i + batchSize - 1, pdf.numPages);

        for (let pageNum = i; pageNum <= endPage; pageNum++) {
            batch.push(
                pdf.getPage(pageNum)
                    .then(page => page.getTextContent())
                    .then(content => pageCallback(content, pageNum))
            );
        }

        const batchResults = await Promise.all(batch);
        results.push(...batchResults);

        // Force garbage collection opportunity
        if (global.gc) global.gc();
    }

    return results;
}

Conclusion

JavaScript offers multiple robust approaches for PDF data extraction, each with distinct advantages:

  • PDF.js excels in browser environments and provides detailed control over text positioning
  • pdf-parse offers simplicity and efficiency for server-side Node.js applications
  • Puppeteer handles complex PDFs and interactive forms through browser automation

Choose the method that best fits your environment, performance requirements, and PDF complexity. For production applications, implement proper error handling, retry logic, and consider memory management when processing large documents.

When working with dynamic content or complex PDF interactions, Puppeteer's browser automation capabilities provide the most comprehensive solution for data extraction tasks.

Try WebScraping.AI for Your Web Scraping Needs

Looking for a powerful web scraping solution? WebScraping.AI provides an LLM-powered API that combines Chromium JavaScript rendering with rotating proxies for reliable data extraction.

Key Features:

  • AI-powered extraction: Ask questions about web pages or extract structured data fields
  • JavaScript rendering: Full Chromium browser support for dynamic content
  • Rotating proxies: Datacenter and residential proxies from multiple countries
  • Easy integration: Simple REST API with SDKs for Python, Ruby, PHP, and more
  • Reliable & scalable: Built for developers who need consistent results

Getting Started:

Get page content with AI analysis:

curl "https://api.webscraping.ai/ai/question?url=https://example.com&question=What is the main topic?&api_key=YOUR_API_KEY"

Extract structured data:

curl "https://api.webscraping.ai/ai/fields?url=https://example.com&fields[title]=Page title&fields[price]=Product price&api_key=YOUR_API_KEY"

Try in request builder

Related Questions

Get Started Now

WebScraping.AI provides rotating proxies, Chromium rendering and built-in HTML parser for web scraping
Icon