newspaper-navigator / README.md
davanstrien's picture
davanstrien HF Staff
update paths to biglam/newspaper-navigator (#2)
e2e5858 verified
metadata
dataset_info:
  - config_name: cartoons
    features:
      - name: filepath
        dtype: string
      - name: pub_date
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: decimal128(21, 20)
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
    splits:
      - name: train
        num_bytes: 238959228
        num_examples: 206054
    download_size: 71381230
    dataset_size: 238959228
  - config_name: comics
    features:
      - name: filepath
        dtype: string
      - name: pub_date
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: decimal128(23, 22)
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
    splits:
      - name: train
        num_bytes: 887230943
        num_examples: 526319
    download_size: 352764547
    dataset_size: 887230943
  - config_name: default
    features:
      - name: pub_date
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
      - name: filepath
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: float64
    splits:
      - name: train
        num_bytes: 4315793816
        num_examples: 3274561
    download_size: 1404652833
    dataset_size: 4315793816
  - config_name: illustrations
    features:
      - name: filepath
        dtype: string
      - name: pub_date
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: decimal128(22, 21)
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
    splits:
      - name: train
        num_bytes: 942457956
        num_examples: 798475
    download_size: 258125239
    dataset_size: 942457956
  - config_name: maps
    features:
      - name: filepath
        dtype: string
      - name: pub_date
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: decimal128(22, 21)
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
    splits:
      - name: train
        num_bytes: 382556331
        num_examples: 200188
    download_size: 150128611
    dataset_size: 382556331
  - config_name: photos
    features:
      - name: filepath
        dtype: string
      - name: pub_date
        dtype: string
      - name: page_seq_num
        dtype: int32
      - name: edition_seq_num
        dtype: int64
      - name: batch
        dtype: string
      - name: lccn
        dtype: string
      - name: box
        sequence: decimal128(22, 21)
      - name: score
        dtype: float64
      - name: ocr
        sequence: string
      - name: place_of_publication
        dtype: string
      - name: geographic_coverage
        sequence: string
      - name: name
        dtype: string
      - name: publisher
        dtype: string
      - name: url
        dtype: string
      - name: page_url
        dtype: string
      - name: prediction_section_iiif_url
        dtype: string
      - name: iiif_full_url
        dtype: string
      - name: predicted_type
        dtype: string
    splits:
      - name: train
        num_bytes: 1971669677
        num_examples: 1543525
    download_size: 596762379
    dataset_size: 1971669677
configs:
  - config_name: cartoons
    data_files:
      - split: train
        path: cartoons/train-*
  - config_name: comics
    data_files:
      - split: train
        path: comics/train-*
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
  - config_name: illustrations
    data_files:
      - split: train
        path: illustrations/train-*
  - config_name: maps
    data_files:
      - split: train
        path: maps/train-*
  - config_name: photos
    data_files:
      - split: train
        path: photos/train-*
task_categories:
  - image-classification
  - object-detection
  - image-feature-extraction
tags:
  - glam
  - lam
  - newspapers
  - history
pretty_name: Newspaper Navigator
license: cc0-1.0
language:
  - en
size_categories:
  - 1M<n<10M

Dataset Card for Newspaper Navigator

Dataset Summary

This dataset provides a Parquet-converted version of the Newspaper Navigator dataset from the Library of Congress. Originally released as JSON, Newspaper Navigator contains over 16 million pages of historic US newspapers annotated with bounding boxes, predicted visual types (e.g., photographs, maps), and OCR content. This work was carried out as part of a project by Benjamin Germain Lee et al.

This version of the dataset was created using nnanno, a tool for sampling, annotating, and running inference on the Newspaper Navigator dataset. The dataset has been split into separate configurations by predicted_type (e.g., photographs, illustrations, maps) to enable easy downloading of specific content types.

All records include metadata such as publication date, location, LCCN, bounding boxes, OCR text, IIIF image links, and source URLs.

Currently this version is missing the ads and headlines configurations. I am working on adding these to the dataset!

Loading the Dataset with HuggingFace Datasets

from datasets import load_dataset

# Load a specific configuration (e.g., photos)

photos_dataset = load_dataset("biglam/newspaper-navigator", "photos")

# View the first example

example = photos_dataset["train"][0]
print(f"Publication: {example['name']}")
print(f"Date: {example['pub_date']}")
print(f"OCR text: {example['ocr']}")

# Load multiple configurations

configs = ["photos", "cartoons", "maps"]
datasets = {config: load_dataset("biglam/newspaper-navigator", config) for config in configs}

# Filter by date range (e.g., World War II era)

ww2_photos = photos_dataset["train"].filter(
    lambda x: "1939" <= x["pub_date"][:4] <= "1945"
)

Dataset Structure

The dataset currently has the following configurations:

  • cartoons
  • comics
  • default
  • illustrations
  • maps
  • photos

Each configuration corresponds to one predicted_type of visual content extracted by the original object detection model. For each configuration each row represents a predicted bounding box within a newspaper page.

Each row includes the following features:

  • filepath: File path of the page image
  • pub_date: Publication date (YYYY-MM-DD)
  • page_seq_num: Sequence number of the page within the edition
  • edition_seq_num: Sequence number of the edition
  • batch: Chronicling America batch identifier
  • lccn: Library of Congress Control Number
  • box: Bounding box coordinates [x, y, width, height]
  • score: Prediction confidence score
  • ocr: OCR text extracted from the bounding box
  • place_of_publication: Place of publication
  • geographic_coverage: Geographic areas covered
  • name: Newspaper title
  • publisher: Publisher name
  • url: URL to the original JP2 image
  • page_url: URL to the Chronicling America page
  • prediction_section_iiif_url: IIIF URL targeting the visual region
  • iiif_full_url: IIIF URL for the full page image
  • predicted_type: Predicted visual type (e.g., photograph, map)

Example

{
  "filepath": "data/sn83045462/00280606406/1950081401/0426/006_0_90.jpg",
  "pub_date": "1950-08-14",
  "page_seq_num": 6,
  "edition_seq_num": 1,
  "batch": "dlc_2kandinsky_ver01",
  "lccn": "sn83045462",
  "box": [29.91, 62.29, 13.14, 10.85],
  "score": 0.98,
  "ocr": ["THE PRESIDENT SPEAKS..."],
  "place_of_publication": "Washington, D.C.",
  "geographic_coverage": ["District of Columbia"],
  "name": "Evening Star",
  "publisher": "Evening Star Newspaper Co.",
  "url": "https://chroniclingamerica.loc.gov/data/batches/dlc_2kandinsky_ver01/data/sn83045462/00280606406/1950081401/0426.jp2",
  "page_url": "https://chroniclingamerica.loc.gov/lccn/sn83045462/1950-08-14/ed-1/seq-6/",
  "prediction_section_iiif_url": "https://chroniclingamerica.loc.gov/iiif/2/vtu_londonderry_ver01/data/sn84023252/00200296205/1850051101/0001.jp2/pct:29.91,62.29,13.14,10.85/pct:100/0/default.jpg",
  "iiif_full_url": "https://chroniclingamerica.loc.gov/iiif/2/vtu_londonderry_ver01/data/sn84023252/00200296205/1850051101/0001.jp2/full/full/0/default.jpg",
  "predicted_type": "photograph"
}

Potential Applications

This dataset opens up numerous research and application opportunities across multiple domains:

Historical Research

  • Track visual depictions of events, people, or places across time
  • Analyze evolving graphic design and visual communication techniques in newspapers
  • Study regional differences in visual reporting styles across the United States

Machine Learning

  • Fine-tune computer vision models on historical document imagery
  • Develop OCR improvements for historical printed materials
  • Create multimodal models that combine visual elements with surrounding text

Digital Humanities

  • Analyze how visual representations of specific groups or topics evolved over time
  • Map geographic coverage of visual news content across different regions
  • Study the emergence and evolution of political cartoons, comics, and photojournalism

Educational Applications

  • Create interactive timelines of historical events using primary source imagery
  • Develop educational tools for studying visual culture and media literacy
  • Build classroom resources using authentic historical newspaper visuals

IIIIF tl;dr

This dataset includes IIIF image URLs — an open standard used by libraries and archives to serve high-resolution images. These URLs allow precise access to a full page or a specific region (e.g. a bounding box around a photo or map), without downloading the entire file. Ideal for pipelines that need on-demand loading of visual content.


How a IIIF Image URL Works

A IIIF image URL has a standard structure:

{scheme}://{server}/{prefix}/{identifier}/{region}/{size}/{rotation}/{quality}.{format}

This lets you retrieve just a portion of an image at a given size or resolution.
For example, the region and size parts can be used to extract a cropped 300×300 window from a 10,000×10,000 scan.

Visual representation of the IIIF image URL:


How IIIF Servers Deliver Cropped Images Efficiently

IIIF servers are designed to deliver image regions at scale:

  • Tiled, multi-resolution storage: Images are stored as pyramids (e.g., in JPEG2000 or tiled TIFF).
  • Smart region requests: URLs specify just the part of the image you want.
  • No full image needed: The server returns only the relevant tiles, optionally re-encoded.
  • Highly cacheable: Deterministic URLs make responses easy to cache and scale.

This makes IIIF ideal for loading targeted image crops in ML workflows — without handling large downloads or preprocessing full-page scans.

⚠️ Note: Please use IIIF image APIs responsibly and in accordance with the current Library of Congress Terms of Use. You should still consider the number of requests you make, cache client-side, etc.

Source

Conversion Notes

This version of the dataset was prepared using nnanno.

The conversion process reformats JSON metadata into structured Parquet files for easier querying and integration with modern ML pipelines.

Ethical Considerations

Working with historical newspaper content requires careful consideration of several ethical dimensions:

Historical Context and Bias

Historical newspapers reflect the biases and prejudices of their time periods Content may include offensive, racist, or discriminatory language and imagery Researchers should contextualize findings within the historical social and political climate

Responsible Use Guidelines

  • Avoid decontextualizing historical content in ways that could perpetuate stereotypes
  • Consider adding appropriate content warnings when sharing potentially offensive historical material
  • Acknowledge the partial and biased nature of newspaper coverage from different eras

Technical Limitations and Representation Bias

  • OCR quality varies significantly across the dataset, potentially creating selection bias in text-based analyses
  • The machine learning models used to identify visual elements may have their own biases in classification accuracy
  • Certain communities and perspectives are underrepresented in historical newspaper archives

API Usage Ethics

  • The IIIF image API should be used responsibly to avoid overwhelming the Library of Congress servers
  • Implement caching and throttling in applications that make frequent requests

Citation

@article{visualcontent2020,
  title={Visual Content Extraction from Historical Newspapers},
  author={Benjamin Charles Germain Lee et al.},
  journal={arXiv preprint arXiv:2005.01583},
  year={2020}
}

Acknowledgments

This work draws on the remarkable digitization and metadata efforts of the Library of Congress’s Chronicling America and the innovative Newspaper Navigator project.

Dataset Contact

This conversion of the dataset is created and maintained by @davanstrien.