Dataset Viewer
id
int64 187
569k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 3.48
4.02M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 94
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 455
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
187 |
setup.py
|
internetarchive_openlibrary/setup.py
|
# setup.py is only used by solrbuilder to cythonize some files See
# scripts/solr_builder/build-cython.sh We might be able to remove
# it entirely if we call cython directly from that script.
from setuptools import find_packages, setup
from Cython.Build import cythonize
setup(
# Used to make solrbuilder faster
packages=find_packages(include=['openlibrary', 'openlibrary.*']),
ext_modules=cythonize(
"openlibrary/solr/update.py", compiler_directives={'language_level': "3"}
),
)
| 505 |
Python
|
.py
| 12 | 39 | 81 | 0.75813 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
188 |
conftest.py
|
internetarchive_openlibrary/openlibrary/conftest.py
|
"""pytest configuration for openlibrary
"""
import pytest
import web
from infogami.infobase.tests.pytest_wildcard import Wildcard
from infogami.utils import template
from infogami.utils.view import render_template as infobase_render_template
from openlibrary.i18n import gettext
from openlibrary.core import helpers
from openlibrary.mocks.mock_infobase import mock_site
from openlibrary.mocks.mock_ia import mock_ia
from openlibrary.mocks.mock_memcache import mock_memcache
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
def mock_request(*args, **kwargs):
raise Warning('Network requests are blocked in the testing environment')
monkeypatch.setattr("requests.sessions.Session.request", mock_request)
@pytest.fixture(autouse=True)
def no_sleep(monkeypatch):
def mock_sleep(*args, **kwargs):
raise Warning(
'''
Sleeping is blocked in the testing environment.
Use monkeytime instead; it stubs time.time() and time.sleep().
Eg:
def test_foo(monkeytime):
assert time.time() == 1
time.sleep(1)
assert time.time() == 2
If you need more methods stubbed, edit monkeytime in openlibrary/conftest.py
'''
)
monkeypatch.setattr("time.sleep", mock_sleep)
@pytest.fixture
def monkeytime(monkeypatch):
cur_time = 1
def time():
return cur_time
def sleep(secs):
nonlocal cur_time
cur_time += secs
monkeypatch.setattr("time.time", time)
monkeypatch.setattr("time.sleep", sleep)
@pytest.fixture
def wildcard():
return Wildcard()
@pytest.fixture
def render_template(request):
"""Utility to test templates."""
template.load_templates("openlibrary")
# TODO: call setup on upstream and openlibrary plugins to
# load all globals.
web.template.Template.globals["_"] = gettext
web.template.Template.globals.update(helpers.helpers)
web.ctx.env = web.storage()
web.ctx.headers = []
web.ctx.lang = "en"
# ol_infobase.init_plugin call is failing when trying to import plugins.openlibrary.code.
# monkeypatch to avoid that.
from openlibrary.plugins import ol_infobase
init_plugin = ol_infobase.init_plugin
ol_infobase.init_plugin = lambda: None
def undo():
ol_infobase.init_plugin = init_plugin
request.addfinalizer(undo)
from openlibrary.plugins.openlibrary import code
web.config.db_parameters = {}
code.setup_template_globals()
def finalizer():
template.disktemplates.clear()
web.ctx.clear()
request.addfinalizer(finalizer)
def render(name, *a, **kw):
as_string = kw.pop("as_string", True)
d = infobase_render_template(name, *a, **kw)
return str(d) if as_string else d
return render
| 2,875 |
Python
|
.py
| 77 | 31.038961 | 93 | 0.695228 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
189 |
config.py
|
internetarchive_openlibrary/openlibrary/config.py
|
"""Utility for loading config file.
"""
import os
import sys
import yaml
import infogami
from infogami import config
from infogami.infobase import server
runtime_config = {}
def load(config_file):
"""legacy function to load openlibary config.
The loaded config will be available via runtime_config var in this module.
This doesn't affect the global config.
WARNING: This function is deprecated, please use load_config instead.
"""
if 'pytest' in sys.modules:
# During pytest ensure we're not using like olsystem or something
assert config_file == 'conf/openlibrary.yml'
# for historic reasons
global runtime_config
with open(config_file) as in_file:
runtime_config = yaml.safe_load(in_file)
def load_config(config_file):
"""Loads the config file.
The loaded config will be available via infogami.config.
"""
if 'pytest' in sys.modules:
# During pytest ensure we're not using like olsystem or something
assert config_file == 'conf/openlibrary.yml'
infogami.load_config(config_file)
setup_infobase_config(config_file)
# This sets web.config.db_parameters
server.update_config(config.infobase)
def setup_infobase_config(config_file):
"""Reads the infobase config file and assign it to config.infobase.
The config_file is used as base to resolve relative path, if specified in the config.
"""
if config.get("infobase_config_file"):
dir = os.path.dirname(config_file)
path = os.path.join(dir, config.infobase_config_file)
with open(path) as in_file:
config.infobase = yaml.safe_load(in_file)
else:
config.infobase = {}
| 1,698 |
Python
|
.py
| 44 | 33.409091 | 89 | 0.713764 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
190 |
code.py
|
internetarchive_openlibrary/openlibrary/code.py
|
"""Main entry point for openlibrary app.
Loaded from Infogami plugin mechanism.
"""
import sys
import os
import logging
import logging.config
from infogami.utils import template, macro, i18n, delegate
import infogami
old_plugins = [
"openlibrary",
"worksearch",
"inside",
"books",
"admin",
"upstream",
"importapi",
"recaptcha",
]
def setup():
setup_logging()
logger = logging.getLogger("openlibrary")
logger.info("Application init")
for p in old_plugins:
logger.info("loading plugin %s", p)
modname = "openlibrary.plugins.%s.code" % p
path = "openlibrary/plugins/" + p
template.load_templates(path, lazy=True)
macro.load_macros(path, lazy=True)
i18n.load_strings(path)
__import__(modname, globals(), locals(), ['plugins'])
delegate.plugins += [
delegate._make_plugin_module('openlibrary.plugins.' + name)
for name in old_plugins
]
load_views()
# load actions
from . import actions
logger.info("loading complete.")
def setup_logging():
"""Reads the logging configuration from config file and configures logger."""
try:
logconfig = infogami.config.get("logging_config_file")
if logconfig and os.path.exists(logconfig):
logging.config.fileConfig(logconfig, disable_existing_loggers=False)
except Exception as e:
print("Unable to set logging configuration:", str(e), file=sys.stderr)
raise
def load_views():
"""Registers all views by loading all view modules."""
from .views import showmarc
setup()
| 1,621 |
Python
|
.py
| 52 | 25.826923 | 81 | 0.675048 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
191 |
api.py
|
internetarchive_openlibrary/openlibrary/api.py
|
r"""Open Library API Client.
Sample Usage::
ol = OpenLibrary("http://0.0.0.0:8080")
ol.login('joe', 'secret')
page = ol.get("/sandbox")
print page["body"]
page["body"] += "\n\nTest from API"
ol.save("/sandbox", page, "test from api")
"""
__version__ = "0.1"
__author__ = "Anand Chitipothu <[email protected]>"
import os
import re
import datetime
import json
import web
import logging
import requests
from configparser import ConfigParser
logger = logging.getLogger("openlibrary.api")
class OLError(Exception):
def __init__(self, e):
self.code = e.response.status_code
self.headers = e.response.headers
self.text = e.response.text
Exception.__init__(self, f"{e}. Response: {self.text}")
class OpenLibrary:
def __init__(self, base_url="https://openlibrary.org"):
self.base_url = base_url.rstrip('/') if base_url else "https://openlibrary.org"
self.cookie = None
def _request(self, path, method='GET', data=None, headers=None, params=None):
logger.info("%s %s", method, path)
url = self.base_url + path
headers = headers or {}
params = params or {}
if self.cookie:
headers['Cookie'] = self.cookie
try:
response = requests.request(
method, url, data=data, headers=headers, params=params
)
response.raise_for_status()
return response
except requests.HTTPError as e:
raise OLError(e)
def autologin(self, section=None):
"""Login to Open Library with credentials taken from ~/.olrc file.
The ~/.olrc file must be in ini format (format readable by
configparser module) and there should be a section with the
server name. A sample configuration file may look like this::
[openlibrary.org]
username = joe
password = secret
[0.0.0.0:8080]
username = joe
password = joe123
Optionally section name can be passed as argument to force using a different section name.
If environment variable OPENLIBRARY_RCFILE is specified, it'll read that file instead of ~/.olrc.
"""
config = ConfigParser()
configfile = os.getenv('OPENLIBRARY_RCFILE', os.path.expanduser('~/.olrc'))
logger.info("reading %s", configfile)
config.read(configfile)
section = section or self.base_url.split('://')[-1]
if not config.has_section(section):
raise Exception("No section found with name %s in ~/.olrc" % repr(section))
username = config.get(section, 'username')
password = config.get(section, 'password')
return self.login(username, password)
def login(self, username, password):
"""Login to Open Library with given credentials."""
headers = {'Content-Type': 'application/json'}
try:
data = json.dumps({"username": username, "password": password})
response = self._request(
'/account/login', method='POST', data=data, headers=headers
)
except OLError as e:
response = e
if 'Set-Cookie' in response.headers:
cookies = response.headers['Set-Cookie'].split(',')
self.cookie = ';'.join([c.split(';')[0] for c in cookies])
def get(self, key, v=None):
response = self._request(key + '.json', params={'v': v} if v else {})
return unmarshal(response.json())
def get_many(self, keys):
"""Get multiple documents in a single request as a dictionary."""
if len(keys) > 100:
# Process in batches to avoid crossing the URL length limit.
d = {}
for chunk in web.group(keys, 100):
d.update(self._get_many(chunk))
return d
else:
return self._get_many(keys)
def _get_many(self, keys):
response = self._request("/api/get_many", params={"keys": json.dumps(keys)})
return response.json()['result']
def save(self, key, data, comment=None):
headers = {'Content-Type': 'application/json'}
data = marshal(data)
if comment:
headers['Opt'] = '"%s/dev/docs/api"; ns=42' % self.base_url
headers['42-comment'] = comment
data = json.dumps(data)
return self._request(key, method="PUT", data=data, headers=headers).content
def _call_write(self, name, query, comment, action):
headers = {'Content-Type': 'application/json'}
query = marshal(query)
# use HTTP Extension Framework to add custom headers. see RFC 2774 for more details.
if comment or action:
headers['Opt'] = '"%s/dev/docs/api"; ns=42' % self.base_url
if comment:
headers['42-comment'] = comment
if action:
headers['42-action'] = action
response = self._request(
'/api/' + name, method="POST", data=json.dumps(query), headers=headers
)
return response.json()
def save_many(self, query, comment=None, action=None):
return self._call_write('save_many', query, comment, action)
def write(self, query, comment="", action=""):
"""Internal write API."""
return self._call_write('write', query, comment, action)
def new(self, query, comment=None, action=None):
return self._call_write('new', query, comment, action)
def query(self, q=None, **kw):
"""Query Open Library.
Open Library always limits the result to 1000 items due to
performance issues. Pass limit=False to fetch all matching
results by making multiple requests to the server. Please note
that an iterator is returned instead of list when limit=False is
passed.::
>>> ol.query({'type': '/type/type', 'limit': 2}) #doctest: +SKIP
[{'key': '/type/property'}, {'key': '/type/type'}]
>>> ol.query(type='/type/type', limit=2) #doctest: +SKIP
[{'key': '/type/property'}, {'key': '/type/type'}]
"""
q = dict(q or {})
q.update(kw)
q = marshal(q)
def unlimited_query(q):
q['limit'] = 1000
q.setdefault('offset', 0)
q.setdefault('sort', 'key')
while True:
result = self.query(q)
yield from result
if len(result) < 1000:
break
q['offset'] += len(result)
if 'limit' in q and q['limit'] is False:
return unlimited_query(q)
else:
response = self._request("/query.json", params={"query": json.dumps(q)})
return unmarshal(response.json())
def search(self, query, limit=10, offset=0, fields: list[str] | None = None):
return self._request(
'/search.json',
params={
'q': query,
'limit': limit,
'offset': offset,
**({'fields': ','.join(fields)} if fields else {}),
},
).json()
def import_ocaid(self, ocaid, require_marc=True):
data = {
'identifier': ocaid,
'require_marc': 'true' if require_marc else 'false',
}
return self._request('/api/import/ia', method='POST', data=data).text
def import_data(self, data):
return self._request('/api/import', method='POST', data=data).text
def marshal(data):
"""Serializes the specified data in the format required by OL.::
>>> marshal(datetime.datetime(2009, 1, 2, 3, 4, 5, 6789))
{'type': '/type/datetime', 'value': '2009-01-02T03:04:05.006789'}
"""
if isinstance(data, list):
return [marshal(d) for d in data]
elif isinstance(data, dict):
return {k: marshal(v) for k, v in data.items()}
elif isinstance(data, datetime.datetime):
return {"type": "/type/datetime", "value": data.isoformat()}
elif isinstance(data, Text):
return {"type": "/type/text", "value": str(data)}
elif isinstance(data, Reference):
return {"key": str(data)}
else:
return data
def unmarshal(d):
"""Converts OL serialized objects to python.::
>>> unmarshal({"type": "/type/text",
... "value": "hello, world"}) # doctest: +ALLOW_UNICODE
<text: u'hello, world'>
>>> unmarshal({"type": "/type/datetime", "value": "2009-01-02T03:04:05.006789"})
datetime.datetime(2009, 1, 2, 3, 4, 5, 6789)
"""
if isinstance(d, list):
return [unmarshal(v) for v in d]
elif isinstance(d, dict):
if 'key' in d and len(d) == 1:
return Reference(d['key'])
elif 'value' in d and 'type' in d:
if d['type'] == '/type/text':
return Text(d['value'])
elif d['type'] == '/type/datetime':
return parse_datetime(d['value'])
else:
return d['value']
else:
return {k: unmarshal(v) for k, v in d.items()}
else:
return d
def parse_datetime(value):
"""Parses ISO datetime formatted string.::
>>> parse_datetime("2009-01-02T03:04:05.006789")
datetime.datetime(2009, 1, 2, 3, 4, 5, 6789)
"""
if isinstance(value, datetime.datetime):
return value
else:
tokens = re.split(r'-|T|:|\.| ', value)
return datetime.datetime(*map(int, tokens))
class Text(str):
__slots__ = ()
def __repr__(self):
return "<text: %s>" % str.__repr__(self)
class Reference(str):
__slots__ = ()
def __repr__(self):
return "<ref: %s>" % str.__repr__(self)
| 9,714 |
Python
|
.py
| 234 | 32.474359 | 105 | 0.578724 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
192 |
app.py
|
internetarchive_openlibrary/openlibrary/app.py
|
"""Utilities to build the app.
"""
from infogami.utils import app as _app
from infogami.utils.view import render, public
from infogami.utils.macro import macro
from web.template import TemplateResult
class view(_app.page):
"""A view is a class that defines how a page or a set of pages
identified by a regular expression are rendered.
Here is a sample view::
from openlibrary import app
class hello(app.view):
path = "/hello/(.*)"
def GET(self, name):
return app.render_template("hello", name)
"""
# In infogami, the class with this functionality is called page.
# We are redefining with a slightly different terminology to make
# things more readable.
pass
# view is just a base class.
# Defining a class extending from _app.page auto-registers it inside infogami.
# Undoing that.
del _app.pages['/view']
class subview(_app.view):
"""Subviews are views that work an object in the database.
Each subview URL will have two parts, the prefix identifying the key
of the document in the database to work on and the suffix iden identifying
the action.
For example, the in the subview with URL "/works/OL123W/foo/identifiers",
"identifiers" is the action and "/works/OL123W" is the key of the document.
The middle part "foo" is added by a middleware to make the URLs readable
and not that is transparent to this.
Here is a sample subview:
class work_identifiers(delegate.view):
suffix = "identifiers"
types = ["/type/edition"]
"""
# In infogami, the class with this functionality is called a view.
# We are redefining with a slightly different terminology to make
# things more readable.
# Tell infogami not to consider this as a view class
suffix = None
types = None
@macro
@public
def render_template(name: str, *a, **kw) -> TemplateResult:
if "." in name:
name = name.rsplit(".", 1)[0]
return render[name](*a, **kw)
| 2,033 |
Python
|
.py
| 50 | 35.26 | 79 | 0.695364 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
193 |
book_providers.py
|
internetarchive_openlibrary/openlibrary/book_providers.py
|
from dataclasses import dataclass
import logging
from collections.abc import Callable, Iterator
from typing import TypedDict, Literal, cast, TypeVar, Generic
from urllib import parse
import web
from web import uniq
from web.template import TemplateResult
from openlibrary.app import render_template
from openlibrary.plugins.upstream.models import Edition
from openlibrary.plugins.upstream.utils import get_coverstore_public_url
from openlibrary.utils import OrderedEnum, multisort_best
logger = logging.getLogger("openlibrary.book_providers")
AcquisitionAccessLiteral = Literal[
'sample', 'buy', 'open-access', 'borrow', 'subscribe'
]
class EbookAccess(OrderedEnum):
# Keep in sync with solr/conf/enumsConfig.xml !
NO_EBOOK = 0
UNCLASSIFIED = 1
PRINTDISABLED = 2
BORROWABLE = 3
PUBLIC = 4
def to_solr_str(self):
return self.name.lower()
@staticmethod
def from_acquisition_access(literal: AcquisitionAccessLiteral) -> 'EbookAccess':
if literal == 'sample':
# We need to update solr to handle these! Requires full reindex
return EbookAccess.PRINTDISABLED
elif literal == 'buy':
return EbookAccess.NO_EBOOK
elif literal == 'open-access':
return EbookAccess.PUBLIC
elif literal == 'borrow':
return EbookAccess.BORROWABLE
elif literal == 'subscribe':
return EbookAccess.NO_EBOOK
else:
raise ValueError(f'Unknown access literal: {literal}')
@dataclass
class Acquisition:
"""
Acquisition represents a book resource found on another website, such as
Standard Ebooks.
Wording inspired by OPDS; see https://specs.opds.io/opds-1.2#23-acquisition-feeds
"""
access: AcquisitionAccessLiteral
format: Literal['web', 'pdf', 'epub', 'audio']
price: str | None
url: str
provider_name: str | None = None
@property
def ebook_access(self) -> EbookAccess:
return EbookAccess.from_acquisition_access(self.access)
@staticmethod
def from_json(json: dict) -> 'Acquisition':
if 'href' in json:
# OPDS-style provider
return Acquisition.from_opds_json(json)
elif 'url' in json:
# We have an inconsistency in our API
html_access: dict[str, AcquisitionAccessLiteral] = {
'read': 'open-access',
'listen': 'open-access',
'buy': 'buy',
'borrow': 'borrow',
'preview': 'sample',
}
access = json.get('access', 'open-access')
if access in html_access:
access = html_access[access]
# Pressbooks/OL-style
return Acquisition(
access=access,
format=json.get('format', 'web'),
price=json.get('price'),
url=json['url'],
provider_name=json.get('provider_name'),
)
else:
raise ValueError(f'Unknown ebook acquisition format: {json}')
@staticmethod
def from_opds_json(json: dict) -> 'Acquisition':
if json.get('properties', {}).get('indirectAcquisition', None):
mimetype = json['properties']['indirectAcquisition'][0]['type']
else:
mimetype = json['type']
fmt: Literal['web', 'pdf', 'epub', 'audio'] = 'web'
if mimetype.startswith('audio/'):
fmt = 'audio'
elif mimetype == 'application/pdf':
fmt = 'pdf'
elif mimetype == 'application/epub+zip':
fmt = 'epub'
elif mimetype == 'text/html':
fmt = 'web'
else:
logger.warning(f'Unknown mimetype: {mimetype}')
fmt = 'web'
if json.get('properties', {}).get('price', None):
price = f"{json['properties']['price']['value']} {json['properties']['price']['currency']}"
else:
price = None
return Acquisition(
access=json['rel'].split('/')[-1],
format=fmt,
price=price,
url=json['href'],
provider_name=json.get('name'),
)
class IALiteMetadata(TypedDict):
boxid: set[str]
collection: set[str]
access_restricted_item: Literal['true', 'false'] | None
TProviderMetadata = TypeVar('TProviderMetadata')
class AbstractBookProvider(Generic[TProviderMetadata]):
short_name: str
"""
The key in the identifiers field on editions;
see https://openlibrary.org/config/edition
"""
identifier_key: str | None
def get_olids(self, identifier: str) -> list[str]:
return web.ctx.site.things(
{"type": "/type/edition", self.db_selector: identifier}
)
@property
def editions_query(self):
return {f"{self.db_selector}~": "*"}
@property
def db_selector(self) -> str:
return f"identifiers.{self.identifier_key}"
@property
def solr_key(self):
return f"id_{self.identifier_key}"
def get_identifiers(self, ed_or_solr: Edition | dict) -> list[str]:
return (
# If it's an edition
ed_or_solr.get('identifiers', {}).get(self.identifier_key, [])
or
# if it's a solr work record
ed_or_solr.get(f'id_{self.identifier_key}', [])
)
def choose_best_identifier(self, identifiers: list[str]) -> str:
return identifiers[0]
def get_best_identifier(self, ed_or_solr: Edition | dict) -> str:
identifiers = self.get_identifiers(ed_or_solr)
assert identifiers
return self.choose_best_identifier(identifiers)
def get_best_identifier_slug(self, ed_or_solr: Edition | dict) -> str:
"""Used in eg /work/OL1W?edition=ia:foobar URLs, for example"""
return f'{self.short_name}:{self.get_best_identifier(ed_or_solr)}'
def get_template_path(self, typ: Literal['read_button', 'download_options']) -> str:
return f"book_providers/{self.short_name}_{typ}.html"
def render_read_button(
self, ed_or_solr: Edition | dict, analytics_attr: Callable[[str], str]
) -> TemplateResult:
return render_template(
self.get_template_path('read_button'),
self.get_best_identifier(ed_or_solr),
analytics_attr,
)
def render_download_options(
self, edition: Edition, extra_args: list | None = None
) -> TemplateResult:
return render_template(
self.get_template_path('download_options'),
self.get_best_identifier(edition),
*(extra_args or []),
)
def is_own_ocaid(self, ocaid: str) -> bool:
"""Whether the ocaid is an archive of content from this provider"""
return False
def get_access(
self,
edition: dict,
metadata: TProviderMetadata | None = None,
) -> EbookAccess:
"""
Return the access level of the edition.
"""
# Most providers are for public-only ebooks right now
return EbookAccess.PUBLIC
def get_acquisitions(
self,
edition: Edition | web.Storage,
) -> list[Acquisition]:
if edition.providers:
return [Acquisition.from_json(dict(p)) for p in edition.providers]
else:
return []
class InternetArchiveProvider(AbstractBookProvider[IALiteMetadata]):
short_name = 'ia'
identifier_key = 'ocaid'
@property
def db_selector(self) -> str:
return self.identifier_key
@property
def solr_key(self) -> str:
return "ia"
def get_identifiers(self, ed_or_solr: Edition | dict) -> list[str]:
# Solr work record augmented with availability
# Sometimes it's set explicitly to None, for some reason
availability = ed_or_solr.get('availability', {}) or {}
if availability.get('identifier'):
return [ed_or_solr['availability']['identifier']]
# Edition
if ed_or_solr.get('ocaid'):
return [ed_or_solr['ocaid']]
# Solr work record
return ed_or_solr.get('ia', [])
def is_own_ocaid(self, ocaid: str) -> bool:
return True
def render_download_options(
self, edition: Edition, extra_args: list | None = None
) -> TemplateResult | str:
if edition.is_access_restricted():
return ''
formats = {
'pdf': edition.get_ia_download_link('.pdf'),
'epub': edition.get_ia_download_link('.epub'),
'mobi': edition.get_ia_download_link('.mobi'),
'txt': edition.get_ia_download_link('_djvu.txt'),
}
if any(formats.values()):
return render_template(
self.get_template_path('download_options'),
formats,
edition.url('/daisy'),
)
else:
return ''
def get_access(
self, edition: dict, metadata: IALiteMetadata | None = None
) -> EbookAccess:
if not metadata:
if edition.get('ocaid'):
return EbookAccess.UNCLASSIFIED
else:
return EbookAccess.NO_EBOOK
collections = metadata.get('collection', set())
access_restricted_item = metadata.get('access_restricted_item') == "true"
if 'inlibrary' in collections:
return EbookAccess.BORROWABLE
elif 'printdisabled' in collections:
return EbookAccess.PRINTDISABLED
elif access_restricted_item or not collections:
return EbookAccess.UNCLASSIFIED
else:
return EbookAccess.PUBLIC
def get_acquisitions(
self,
edition: Edition,
) -> list[Acquisition]:
return [
Acquisition(
access='open-access',
format='web',
price=None,
url=f'https://archive.org/details/{self.get_best_identifier(edition)}',
provider_name=self.short_name,
)
]
class LibriVoxProvider(AbstractBookProvider):
short_name = 'librivox'
identifier_key = 'librivox'
def render_download_options(self, edition: Edition, extra_args: list | None = None):
# The template also needs the ocaid, since some of the files are hosted on IA
return super().render_download_options(edition, [edition.get('ocaid')])
def is_own_ocaid(self, ocaid: str) -> bool:
return 'librivox' in ocaid
def get_acquisitions(
self,
edition: Edition,
) -> list[Acquisition]:
return [
Acquisition(
access='open-access',
format='audio',
price=None,
url=f'https://librivox.org/{self.get_best_identifier(edition)}',
provider_name=self.short_name,
)
]
class ProjectGutenbergProvider(AbstractBookProvider):
short_name = 'gutenberg'
identifier_key = 'project_gutenberg'
def is_own_ocaid(self, ocaid: str) -> bool:
return ocaid.endswith('gut')
def get_acquisitions(
self,
edition: Edition,
) -> list[Acquisition]:
return [
Acquisition(
access='open-access',
format='web',
price=None,
url=f'https://www.gutenberg.org/ebooks/{self.get_best_identifier(edition)}',
provider_name=self.short_name,
)
]
class StandardEbooksProvider(AbstractBookProvider):
short_name = 'standard_ebooks'
identifier_key = 'standard_ebooks'
def is_own_ocaid(self, ocaid: str) -> bool:
# Standard ebooks isn't archived on IA
return False
def get_acquisitions(
self,
edition: Edition,
) -> list[Acquisition]:
standard_ebooks_id = self.get_best_identifier(edition)
base_url = 'https://standardebooks.org/ebooks/' + standard_ebooks_id
flat_id = standard_ebooks_id.replace('/', '_')
return [
Acquisition(
access='open-access',
format='web',
price=None,
url=f'{base_url}/text/single-page',
provider_name=self.short_name,
),
Acquisition(
access='open-access',
format='epub',
price=None,
url=f'{base_url}/downloads/{flat_id}.epub',
provider_name=self.short_name,
),
]
class OpenStaxProvider(AbstractBookProvider):
short_name = 'openstax'
identifier_key = 'openstax'
def is_own_ocaid(self, ocaid: str) -> bool:
return False
def get_acquisitions(
self,
edition: Edition,
) -> list[Acquisition]:
return [
Acquisition(
access='open-access',
format='web',
price=None,
url=f'https://openstax.org/details/books/{self.get_best_identifier(edition)}',
provider_name=self.short_name,
)
]
class CitaPressProvider(AbstractBookProvider):
short_name = 'cita_press'
identifier_key = 'cita_press'
def is_own_ocaid(self, ocaid: str) -> bool:
return False
class DirectProvider(AbstractBookProvider):
short_name = 'direct'
identifier_key = None
@property
def db_selector(self) -> str:
return "providers.url"
@property
def solr_key(self) -> None:
# TODO: Not implemented yet
return None
def get_identifiers(self, ed_or_solr: Edition | dict) -> list[str]:
"""
Note: This will only work for solr records if the provider field was fetched
in the solr request. (Note: this field is populated from db)
"""
if providers := ed_or_solr.get('providers', []):
identifiers = [
provider.url
for provider in map(Acquisition.from_json, ed_or_solr['providers'])
if provider.ebook_access >= EbookAccess.PRINTDISABLED
]
to_remove = set()
for tbp in PROVIDER_ORDER:
# Avoid infinite recursion.
if isinstance(tbp, DirectProvider):
continue
if not tbp.get_identifiers(ed_or_solr):
continue
for acq in tbp.get_acquisitions(ed_or_solr):
to_remove.add(acq.url)
return [
identifier for identifier in identifiers if identifier not in to_remove
]
else:
# TODO: Not implemented for search/solr yet
return []
def render_read_button(
self, ed_or_solr: Edition | dict, analytics_attr: Callable[[str], str]
) -> TemplateResult | str:
acq_sorted = sorted(
(
p
for p in map(Acquisition.from_json, ed_or_solr.get('providers', []))
if p.ebook_access >= EbookAccess.PRINTDISABLED
),
key=lambda p: p.ebook_access,
reverse=True,
)
if not acq_sorted:
return ''
acquisition = acq_sorted[0]
# pre-process acquisition.url so ParseResult.netloc is always the domain. Only netloc is used.
url = (
"https://" + acquisition.url
if not acquisition.url.startswith("http")
else acquisition.url
)
parsed_url = parse.urlparse(url)
domain = parsed_url.netloc
return render_template(
self.get_template_path('read_button'), acquisition, domain
)
def render_download_options(self, edition: Edition, extra_args: list | None = None):
# Return an empty string until #9581 is addressed.
return ""
def get_access(
self,
edition: dict,
metadata: TProviderMetadata | None = None,
) -> EbookAccess:
"""
Return the access level of the edition.
"""
# For now assume 0 is best
return EbookAccess.from_acquisition_access(
Acquisition.from_json(edition['providers'][0]).access
)
class WikisourceProvider(AbstractBookProvider):
short_name = 'wikisource'
identifier_key = 'wikisource'
PROVIDER_ORDER: list[AbstractBookProvider] = [
# These providers act essentially as their own publishers, so link to the first when
# we're on an edition page
DirectProvider(),
LibriVoxProvider(),
ProjectGutenbergProvider(),
StandardEbooksProvider(),
OpenStaxProvider(),
CitaPressProvider(),
WikisourceProvider(),
# Then link to IA
InternetArchiveProvider(),
]
def get_cover_url(ed_or_solr: Edition | dict) -> str | None:
"""
Get the cover url most appropriate for this edition or solr work search result
"""
size = 'M'
# Editions
if isinstance(ed_or_solr, Edition):
cover = ed_or_solr.get_cover()
return cover.url(size) if cover else None
# Solr edition
elif ed_or_solr['key'].startswith('/books/'):
if ed_or_solr.get('cover_i'):
return (
get_coverstore_public_url()
+ f'/b/id/{ed_or_solr["cover_i"]}-{size}.jpg'
)
else:
return None
# Solr document augmented with availability
availability = ed_or_solr.get('availability', {}) or {}
if availability.get('openlibrary_edition'):
olid = availability.get('openlibrary_edition')
return f"{get_coverstore_public_url()}/b/olid/{olid}-{size}.jpg"
if availability.get('identifier'):
ocaid = ed_or_solr['availability']['identifier']
return f"//archive.org/services/img/{ocaid}"
# Plain solr - we don't know which edition is which here, so this is most
# preferable
if ed_or_solr.get('cover_i'):
cover_i = ed_or_solr["cover_i"]
return f'{get_coverstore_public_url()}/b/id/{cover_i}-{size}.jpg'
if ed_or_solr.get('cover_edition_key'):
olid = ed_or_solr['cover_edition_key']
return f"{get_coverstore_public_url()}/b/olid/{olid}-{size}.jpg"
if ed_or_solr.get('ocaid'):
return f"//archive.org/services/img/{ed_or_solr.get('ocaid')}"
# No luck
return None
def is_non_ia_ocaid(ocaid: str) -> bool:
"""
Check if the ocaid "looks like" it's from another provider
"""
providers = (provider for provider in PROVIDER_ORDER if provider.short_name != 'ia')
return any(provider.is_own_ocaid(ocaid) for provider in providers)
def get_book_provider_by_name(short_name: str) -> AbstractBookProvider | None:
return next((p for p in PROVIDER_ORDER if p.short_name == short_name), None)
ia_provider = cast(InternetArchiveProvider, get_book_provider_by_name('ia'))
prefer_ia_provider_order = uniq([ia_provider, *PROVIDER_ORDER])
def get_provider_order(prefer_ia: bool = False) -> list[AbstractBookProvider]:
default_order = prefer_ia_provider_order if prefer_ia else PROVIDER_ORDER
provider_order = default_order
provider_overrides = None
# Need this to work in test environments
if 'env' in web.ctx:
provider_overrides = web.input(providerPref=None, _method='GET').providerPref
if provider_overrides:
new_order: list[AbstractBookProvider] = []
for name in provider_overrides.split(','):
if name == '*':
new_order += default_order
else:
provider = get_book_provider_by_name(name)
if not provider:
# TODO: Show the user a warning somehow
continue
new_order.append(provider)
new_order = uniq(new_order + default_order)
if new_order:
provider_order = new_order
return provider_order
def get_book_providers(ed_or_solr: Edition | dict) -> Iterator[AbstractBookProvider]:
# On search results which don't have an edition selected, we want to display
# IA copies first.
# Issue is that an edition can be provided by multiple providers; we can easily
# choose the correct copy when on an edition, but on a solr work record, with all
# copies of all editions aggregated, it's more difficult.
# So we do some ugly ocaid sniffing to try to guess :/ Idea being that we ignore
# OCAIDs that look like they're from other providers.
has_edition = isinstance(ed_or_solr, Edition) or ed_or_solr['key'].startswith(
'/books/'
)
prefer_ia = not has_edition
if prefer_ia:
ia_ocaids = [
ocaid
# Subjects/publisher pages have ia set to a specific value :/
for ocaid in uniq(ia_provider.get_identifiers(ed_or_solr) or [])
if not is_non_ia_ocaid(ocaid)
]
prefer_ia = bool(ia_ocaids)
provider_order = get_provider_order(prefer_ia)
for provider in provider_order:
if provider.get_identifiers(ed_or_solr):
yield provider
def get_book_provider(ed_or_solr: Edition | dict) -> AbstractBookProvider | None:
return next(get_book_providers(ed_or_solr), None)
def get_best_edition(
editions: list[Edition],
) -> tuple[Edition | None, AbstractBookProvider | None]:
provider_order = get_provider_order(True)
# Map provider name to position/ranking
provider_rank_lookup: dict[AbstractBookProvider | None, int] = {
provider: i for i, provider in enumerate(provider_order)
}
# Here, we prefer the ia editions
augmented_editions = [(edition, get_book_provider(edition)) for edition in editions]
best = multisort_best(
augmented_editions,
[
# Prefer the providers closest to the top of the list
('min', lambda rec: provider_rank_lookup.get(rec[1], float('inf'))),
# Prefer the editions with the most fields
('max', lambda rec: len(dict(rec[0]))),
# TODO: Language would go in this queue somewhere
],
)
return best if best else (None, None)
def get_solr_keys() -> list[str]:
return [p.solr_key for p in PROVIDER_ORDER if p.solr_key]
setattr(get_book_provider, 'ia', get_book_provider_by_name('ia')) # noqa: B010
| 22,287 |
Python
|
.py
| 568 | 30.105634 | 103 | 0.607057 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
194 |
actions.py
|
internetarchive_openlibrary/openlibrary/actions.py
|
"""Custom OL Actions.
"""
import infogami
import sys
@infogami.action
def runmain(modulename, *args):
print("run_main", modulename, sys.argv)
mod = __import__(modulename, globals(), locals(), modulename.split("."))
mod.main(*args)
| 246 |
Python
|
.py
| 9 | 24.666667 | 76 | 0.696581 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
195 |
mapreduce.py
|
internetarchive_openlibrary/openlibrary/data/mapreduce.py
|
"""Simple library to process large datasets using map-reduce.
This works as follows:
* Takes an iterator of key-value pairs as input
* Applies the map function for each key-value pair. The map function does the
required processing to yield zero or more key-value pairs.
* The result of map are stored in the disk in to multiple files based on the
hash of the key. This makes sure the all the entries with same key goes to
the same file.
* Each of the file is sorted on key to group all the values of a key and the
reduce function is applied for each key and its values.
* The reduced key, value pairs are returned as an iterator.
"""
import sys
import itertools
import os
import subprocess
import logging
import gzip
logger = logging.getLogger("mapreduce")
class Task:
"""Abstraction of a map-reduce task.
Each task should extend this class and implement map and reduce functions.
"""
def __init__(self, tmpdir="/tmp/mapreduce", filecount=100, hashfunc=None):
self.tmpdir = tmpdir
self.filecount = 100
self.hashfunc = None
def map(self, key, value):
"""Function to map given key-value pair into zero or more key-value pairs.
The implementation should yield the key-value pairs.
"""
raise NotImplementedError()
def reduce(self, key, values):
"""Function to reduce given values.
The implementation should return a key-value pair, with the reduced value.
"""
raise NotImplementedError()
def read(self):
for line in sys.sydin:
key, value = line.strip().split("\t", 1)
yield key, value
def map_all(self, records, disk):
for key, value in records:
for k, v in self.map(key, value):
disk.write(k, v)
disk.close()
def reduce_all(self, records):
for key, chunk in itertools.groupby(records, lambda record: record[0]):
values = [value for key, value in chunk]
yield self.reduce(key, values)
def process(self, records):
"""Takes key-value pairs, applies map-reduce and returns the resultant key-value pairs."""
# Map the record and write to disk
disk = Disk(self.tmpdir, mode="w", hashfunc=self.hashfunc)
self.map_all(records, disk)
disk.close()
# Read from the disk in the sorted order and reduce
disk = Disk(self.tmpdir, mode="r", hashfunc=self.hashfunc)
records = disk.read_semisorted()
return self.reduce_all(records)
class Disk:
"""Map Reduce Disk to manage key values.
The data is stored over multiple files based on the key. All records with same key will fall in the same file.
"""
def __init__(self, dir, prefix="shard", filecount=100, hashfunc=None, mode="r"):
self.dir = dir
self.prefix = prefix
self.hashfunc = hashfunc or (lambda key: hash(key))
self.buffersize = 1024 * 1024
if not os.path.exists(dir):
os.makedirs(dir)
self.files = [self.openfile(i, mode) for i in range(filecount)]
def openfile(self, index, mode):
filename = "%s-%03d.txt.gz" % (self.prefix, index)
path = os.path.join(self.dir, filename)
return gzip.open(path, mode)
def write(self, key, value):
index = self.hashfunc(key) % len(self.files)
f = self.files[index]
f.write(key + "\t" + value + "\n")
def close(self):
for f in self.files:
f.close()
def read_semisorted(self):
"""Sorts each file in the disk and returns an iterator over the key-values in each file.
All the values with same key will come together as each file is sorted, but there is no guaranty on the global order of keys.
"""
for f in self.files:
cmd = "gzip -cd %s | sort -S1G" % f.name
logger.info(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
key, value = line.split("\t", 1)
yield key, value
status = p.wait()
if status != 0:
raise Exception("sort failed with status %d" % status)
| 4,222 |
Python
|
.py
| 97 | 35.773196 | 133 | 0.642997 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
196 |
db.py
|
internetarchive_openlibrary/openlibrary/data/db.py
|
#!/usr/bin/env python
"""Library to provide fast access to Open Library database.
How to use:
from openlibrary.data import db
db.setup_database(db='openlibrary', user='anand', pw='')
db.setup_memcache(['host1:port1', 'host2:port2'])
# get a set of docs
docs = db.get_docs(['/sandbox'])
# get all books
books = dc.itedocs(type="/type/edition")
# update docs
db.update_docs(docs)
Each doc is a storage object with "id", "key", "revision" and "data".
"""
from openlibrary.utils import olmemcache
import json
import web
import datetime
import sys
import time
__all__ = [
"setup_database",
"setup_memcache",
"longquery",
"iterdocs",
# "get_docs", # "get_docs()" is not defined.
"update_docs",
]
db_parameters = None
db = None
mc = None
def setup_database(**db_params):
"""Setup the database. This must be called before using any other functions in this module."""
global db, db_parameters
db_params.setdefault('dbn', 'postgres')
db = web.database(**db_params)
db.printing = False
db_parameters = db_params
def setup_memcache(servers):
"""Setup the memcached servers.
This must be called along with setup_database, if memcached servers are used in the system.
"""
global mc
mc = olmemcache.Client(servers)
def iterdocs(type=None):
"""Returns an iterator over all docs in the database.
If type is specified, then only docs of that type will be returned.
"""
q = 'SELECT id, key, latest_revision as revision FROM thing'
if type:
type_id = get_thing_id(type)
q += ' WHERE type=$type_id'
q += ' ORDER BY id'
for chunk in longquery(q, locals()):
docs = chunk
_fill_data(docs)
yield from docs
def longquery(query, vars, chunk_size=10000):
"""Execute an expensive query using db cursors.
USAGE:
for chunk in longquery("SELECT * FROM bigtable"):
for row in chunk:
print row
"""
# DB cursor is valid only in the transaction
# Create a new database to avoid this transaction interfere with the application code
db = web.database(**db_parameters)
db.printing = False
tx = db.transaction()
try:
db.query("DECLARE longquery NO SCROLL CURSOR FOR " + query, vars=vars)
while True:
chunk = db.query(
"FETCH FORWARD $chunk_size FROM longquery", vars=locals()
).list()
if chunk:
yield chunk
else:
break
finally:
tx.rollback()
def _fill_data(docs):
"""Add `data` to all docs by querying memcache/database."""
def get(keys):
if not keys:
return []
return db.query(
"SELECT thing.id, thing.key, data.revision, data.data"
" FROM thing, data"
" WHERE thing.id = data.thing_id"
" AND thing.latest_revision = data.revision"
" AND key in $keys",
vars=locals(),
)
keys = [doc.key for doc in docs]
d = mc and mc.get_multi(keys) or {}
debug(f"{len(d)}/{len(keys)} found in memcache")
keys = [doc.key for doc in docs if doc.key not in d]
for row in get(keys):
d[row.key] = row.data
for doc in docs:
doc.data = json.loads(d[doc.key])
return docs
def read_docs(keys, for_update=False):
"""Read the docs the docs from DB."""
if not keys:
return []
debug("BEGIN SELECT")
q = "SELECT thing.id, thing.key, thing.latest_revision as revision FROM thing WHERE key IN $keys"
if for_update:
q += " FOR UPDATE"
docs = db.query(q, vars=locals())
docs = docs.list()
debug("END SELECT")
_fill_data(docs)
return docs
def update_docs(docs, comment, author, ip="127.0.0.1"):
"""Updates the given docs in the database by writing all the docs in a chunk.
This doesn't update the index tables. Avoid this function if you have any change that requires updating the index tables.
"""
now = datetime.datetime.utcnow()
author_id = get_thing_id(author)
t = db.transaction()
try:
docdict = {doc.id: doc for doc in docs}
thing_ids = list(docdict)
# lock the rows in the table
rows = db.query(
"SELECT id, key, latest_revision FROM thing where id IN $thing_ids FOR UPDATE",
vars=locals(),
)
# update revision and last_modified in each document
for row in rows:
doc = docdict[row.id]
doc.revision = row.latest_revision + 1
doc.data['revision'] = doc.revision
doc.data['latest_revision'] = doc.revision
doc.data['last_modified']['value'] = now.isoformat()
tx_id = db.insert(
"transaction",
author_id=author_id,
action="bulk_update",
ip="127.0.0.1",
created=now,
comment=comment,
)
debug("INSERT version")
db.multiple_insert(
"version",
[
{"thing_id": doc.id, "transaction_id": tx_id, "revision": doc.revision}
for doc in docs
],
seqname=False,
)
debug("INSERT data")
data = [
web.storage(
thing_id=doc.id, revision=doc.revision, data=json.dumps(doc.data)
)
for doc in docs
]
db.multiple_insert("data", data, seqname=False)
debug("UPDATE thing")
db.query(
"UPDATE thing set latest_revision=latest_revision+1 WHERE id IN $thing_ids",
vars=locals(),
)
except:
t.rollback()
debug("ROLLBACK")
raise
else:
t.commit()
debug("COMMIT")
mapping = {doc.key: d.data for doc, d in zip(docs, data)}
mc and mc.set_multi(mapping)
debug("MC SET")
def debug(*a):
print(time.asctime(), a, file=sys.stderr)
@web.memoize
def get_thing_id(key):
return db.query("SELECT * FROM thing WHERE key=$key", vars=locals())[0].id
| 6,133 |
Python
|
.py
| 183 | 25.95082 | 125 | 0.598307 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
197 |
sitemap.py
|
internetarchive_openlibrary/openlibrary/data/sitemap.py
|
"""Library for generating sitemaps from Open Library dump.
Input for generating sitemaps is a tsv file with "path", "title", "created"
and "last_modified" columns. It is desirable that the file is sorted on
"created" and "path".
http://www.archive.org/download/ol-sitemaps/sitemap-books-0001.xml.gz
http://www.archive.org/download/ol-sitemaps/sitemap-books-0001.xml.gz
http://www.archive.org/download/ol-sitemaps/sitindex-books.xml.gz
http://www.archive.org/download/ol-sitemaps/sitindex-authors.xml.gz
http://www.archive.org/download/ol-sitemaps/sitindex-works.xml.gz
http://www.archive.org/download/ol-sitemaps/sitindex-subjects.xml.gz
"""
import sys
import os
import web
import datetime
from gzip import open as gzopen
from openlibrary.plugins.openlibrary.processors import urlsafe
t = web.template.Template
t_sitemap = t(
"""$def with (docs)
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
$for path, title, created, last_modified in docs:
<url><loc>http://openlibrary.org$path</loc><lastmod>${last_modified}Z</lastmod></url>
</urlset>
"""
)
t_siteindex = t(
"""$def with (base_url, rows)
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
$for filename, timestamp in rows:
<sitemap><loc>$base_url/$filename</loc><lastmod>$timestamp</lastmod></sitemap>
</sitemapindex>
"""
)
t_html_layout = t(
"""$def with (page)
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex,follow" />
<link href="/css/all.css" rel="stylesheet" type="text/css" />
<title>$page.title</title>
</head>
<body id="edit">
<div id="background">
<div id="placement">
<div id="position">$:page</div>
</div>
</div>
</body></html>"""
)
t_html_sitemap = t(
"""$def with (back, docs)
$var title: Index
<p><a href="$back">← Back to Index</a></p>
<ul>
$for path, title in docs:
<li><a href="$path">$title</a></li>
</ul>
"""
)
def gzwrite(path, data):
f = gzopen(path, 'w')
f.write(data)
f.close()
def write_sitemaps(data, outdir, prefix):
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
# maximum permitted entries in one sitemap is 50K.
for i, rows in enumerate(web.group(data, 50000)):
filename = "sitemap_%s_%04d.xml.gz" % (prefix, i)
print("generating", filename, file=sys.stderr)
sitemap = web.safestr(t_sitemap(rows))
path = os.path.join(outdir, filename)
gzwrite(path, sitemap)
yield filename, timestamp
def write_siteindex(data, outdir, prefix):
rows = write_sitemaps(data, outdir, prefix)
base_url = "http://openlibrary.org/static/sitemaps/"
filename = "siteindex_%s.xml.gz" % prefix
print("generating", filename, file=sys.stderr)
path = os.path.join(outdir, filename)
siteindex = web.safestr(t_siteindex(base_url, rows))
gzwrite(path, siteindex)
def parse_index_file(index_file):
data = (line.strip().split("\t") for line in open(index_file))
data = ([t[0], " ".join(t[1:-2]), t[-2], t[-1]] for t in data)
return data
def generate_sitemaps(index_file, outdir, prefix):
data = parse_index_file(index_file)
write_siteindex(data, outdir, prefix)
def mkdir_p(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def write(path, data):
print("writing", path)
mkdir_p(os.path.dirname(path))
f = open(path, "w")
f.write(data)
f.close()
def dirindex(dir, back=".."):
data = [(f, f) for f in sorted(os.listdir(dir))]
index = t_html_layout(t_html_sitemap(back, data))
path = dir + "/index.html"
write(path, web.safestr(index))
def generate_html_index(index_file, outdir):
data = parse_index_file(index_file)
data = ((d[0], d[1]) for d in data)
for i, chunk in enumerate(web.group(data, 1000)):
back = ".."
index = t_html_layout(t_html_sitemap(back, chunk))
path = outdir + "/%02d/%05d.html" % (i / 1000, i)
write(path, web.safestr(index))
for f in os.listdir(outdir):
path = os.path.join(outdir, f)
if os.path.isdir(path):
dirindex(path)
dirindex(outdir, back=".")
| 4,412 |
Python
|
.py
| 121 | 32.68595 | 89 | 0.676547 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
198 |
__init__.py
|
internetarchive_openlibrary/openlibrary/data/__init__.py
|
"""Library for managing Open Library data"""
import json
from openlibrary.data.dump import pgdecode
def parse_data_table(filename):
"""Parses the dump of data table and returns an iterator with
<key, type, revision, json> for all entries.
"""
for line in open(filename):
thing_id, revision, json_data = pgdecode(line).strip().split("\t")
d = json.loads(json_data)
yield d['key'], d['type']['key'], str(d['revision']), json_data
| 472 |
Python
|
.py
| 11 | 37.909091 | 74 | 0.671772 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
199 |
dump.py
|
internetarchive_openlibrary/openlibrary/data/dump.py
|
"""Library for generating and processing Open Library data dumps.
Glossary:
* dump - Dump of latest revisions of all documents.
* cdump - Complete dump. Dump of all revisions of all documents.
* idump - Incremental dump. Dump of all revisions created in the given day.
"""
import gzip
import itertools
import json
import logging
import os
import re
import sys
from datetime import datetime
import web
from openlibrary.data import db
from openlibrary.data.sitemap import generate_html_index, generate_sitemaps
from openlibrary.plugins.openlibrary.processors import urlsafe
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
def log(*args) -> None:
args_str = " ".join(str(a) for a in args)
msg = f"{datetime.now():%Y-%m-%d %H:%M:%S} [openlibrary.dump] {args_str}"
logger.info(msg)
print(msg, file=sys.stderr)
def print_dump(json_records, filter=None):
"""Print the given json_records in the dump format."""
start_time = datetime.now()
for i, raw_json_data in enumerate(json_records):
if i % 1_000_000 == 0:
log(f"print_dump {i:,}")
d = json.loads(raw_json_data)
d.pop("id", None)
d = _process_data(d)
key = web.safestr(d["key"])
# skip user pages
if key.startswith("/people/") and not re.match(
r"^/people/[^/]+/lists/OL\d+L$", key
):
continue
# skip admin pages
if key.startswith("/admin/"):
continue
# skip obsolete pages. Obsolete pages include volumes, scan_records and users
# marked as spam.
if key.startswith(("/b/", "/scan", "/old/")) or not key.startswith("/"):
continue
if filter and not filter(d):
continue
type_key = d["type"]["key"]
timestamp = d["last_modified"]["value"]
json_data = json.dumps(d)
print("\t".join([type_key, key, str(d["revision"]), timestamp, json_data]))
minutes = (datetime.now() - start_time).seconds // 60
log(f" print_dump() processed {i:,} records in {minutes:,} minutes.")
def read_data_file(filename: str, max_lines: int = 0):
"""
max_lines allows us to test the process with a subset of all records.
Setting max_lines to 0 will processes all records.
"""
start_time = datetime.now()
log(f"read_data_file({filename}, max_lines={max_lines if max_lines else 'all'})")
for i, line in enumerate(xopen(filename, "rt")):
thing_id, revision, json_data = line.strip().split("\t")
yield pgdecode(json_data)
if max_lines and i >= max_lines:
break
minutes = (datetime.now() - start_time).seconds // 60
log(f"read_data_file() processed {i:,} records in {minutes:,} minutes.")
def xopen(path: str, mode: str):
if path.endswith(".gz"):
return gzip.open(path, mode)
else:
return open(path, mode)
def read_tsv(file, strip=True):
"""Read a tab separated file and return an iterator over rows."""
start_time = datetime.now()
log(f"read_tsv({file})")
if isinstance(file, str):
file = xopen(file, "rt")
for i, line in enumerate(file):
if i % 1_000_000 == 0:
log(f"read_tsv {i:,}")
if strip:
line = line.strip()
yield line.split("\t")
minutes = (datetime.now() - start_time).seconds // 60
log(f" read_tsv() processed {i:,} records in {minutes:,} minutes.")
def generate_cdump(data_file, date=None):
"""Generates cdump from a copy of data table. If date is specified, only revisions
created on or before that date will be considered.
"""
# adding Z to the date will make sure all the timestamps are less than that date.
#
# >>> "2010-05-17T10:20:30" < "2010-05-17"
# False
# >>> "2010-05-17T10:20:30" < "2010-05-17Z"
# True
#
# If scripts/oldump.sh has exported $OLDUMP_TESTING then save a lot of time by only
# processing a subset of the lines in data_file.
log(f"generate_cdump({data_file}, {date}) reading")
max_lines = 1_000_000 if os.getenv("OLDUMP_TESTING") else 0 # 0 means unlimited.
filter = date and (lambda doc: doc["last_modified"]["value"] < date + "Z")
print_dump(read_data_file(data_file, max_lines), filter=filter)
def sort_dump(dump_file=None, tmpdir="/tmp/", buffer_size="1G"):
"""Sort the given dump based on key."""
start_time = datetime.now()
tmpdir = os.path.join(tmpdir, "oldumpsort")
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
M = 1024 * 1024
filenames = [os.path.join(tmpdir, "%02x.txt.gz" % i) for i in range(256)]
files = [gzip.open(f, "wb") for f in filenames]
stdin = xopen(dump_file, "rb") if dump_file else sys.stdin.buffer
# split the file into 256 chunks using hash of key
log("sort_dump", dump_file or "stdin")
for i, line in enumerate(stdin):
if i % 1_000_000 == 0:
log(f"sort_dump {i:,}")
type, key, revision, timestamp, json_data = line.strip().split(b"\t")
findex = hash(key) % 256
files[findex].write(line)
for f in files:
f.flush()
f.close()
files = []
for fname in filenames:
log("sort_dump", fname)
status = os.system(
"gzip -cd %(fname)s | sort -S%(buffer_size)s -k2,3" % locals()
)
if status != 0:
raise Exception("sort failed with status %d" % status)
minutes = (datetime.now() - start_time).seconds // 60
log(f"sort_dump() processed {i:,} records in {minutes:,} minutes.")
def generate_dump(cdump_file=None):
"""Generate dump from cdump.
The given cdump must be sorted by key.
"""
def process(data):
revision = lambda cols: int(cols[2]) # noqa: E731
for key, rows in itertools.groupby(data, key=lambda cols: cols[1]):
row = max(rows, key=revision)
yield row
start_time = datetime.now()
tjoin = "\t".join
data = read_tsv(cdump_file or sys.stdin, strip=False)
# group by key and find the max by revision
sys.stdout.writelines(tjoin(row) for row in process(data))
minutes = (datetime.now() - start_time).seconds // 60
log(f"generate_dump({cdump_file}) ran in {minutes:,} minutes.")
def generate_idump(day, **db_parameters):
"""Generate incremental dump for the given day."""
db.setup_database(**db_parameters)
rows = db.longquery(
"SELECT data.* FROM data, version, transaction "
" WHERE data.thing_id=version.thing_id"
" AND data.revision=version.revision"
" AND version.transaction_id=transaction.id"
" AND transaction.created >= $day"
" AND transaction.created < date $day + interval '1 day'"
" ORDER BY transaction.created",
vars=locals(),
chunk_size=10_000,
)
print_dump(row.data for chunk in rows for row in chunk)
def split_dump(dump_file=None, format="oldump_%s.txt"):
"""Split dump into authors, editions, works, redirects, and other."""
log(f"split_dump({dump_file}, format={format})")
start_time = datetime.now()
types = (
"/type/edition",
"/type/author",
"/type/work",
"/type/redirect",
"/type/delete",
"/type/list",
)
files = {}
files['other'] = xopen(format % 'other', 'wt')
for t in types:
tname = t.split("/")[-1] + "s"
files[t] = xopen(format % tname, "wt")
stdin = xopen(dump_file, "rt") if dump_file else sys.stdin
for i, line in enumerate(stdin):
if i % 1_000_000 == 0:
log(f"split_dump {i:,}")
type, rest = line.split("\t", 1)
if type in files:
files[type].write(line)
else:
files['other'].write(line)
for f in files.values():
f.close()
minutes = (datetime.now() - start_time).seconds // 60
log(f"split_dump() processed {i:,} records in {minutes:,} minutes.")
def make_index(dump_file):
"""Make index with "path", "title", "created" and "last_modified" columns."""
log(f"make_index({dump_file})")
start_time = datetime.now()
for i, line in enumerate(read_tsv(dump_file)):
type, key, revision, timestamp, json_data = line
data = json.loads(json_data)
if type in ("/type/edition", "/type/work"):
title = data.get("title", "untitled")
path = key + "/" + urlsafe(title)
elif type in ("/type/author", "/type/list"):
title = data.get("name", "unnamed")
path = key + "/" + urlsafe(title)
else:
title = data.get("title", key)
path = key
title = title.replace("\t", " ")
if "created" in data:
created = data["created"]["value"]
else:
created = "-"
print("\t".join([web.safestr(path), web.safestr(title), created, timestamp]))
minutes = (datetime.now() - start_time).seconds // 60
log(f"make_index() processed {i:,} records in {minutes:,} minutes.")
def _process_key(key):
mapping = {
"/l/": "/languages/",
"/a/": "/authors/",
"/b/": "/books/",
"/user/": "/people/",
}
for old, new in mapping.items():
if key.startswith(old):
return new + key[len(old) :]
return key
def _process_data(data):
"""Convert keys from /a/, /b/, /l/ and /user/
to /authors/, /books/, /languages/ and /people/ respectively."""
if isinstance(data, list):
return [_process_data(d) for d in data]
elif isinstance(data, dict):
if "key" in data:
data["key"] = _process_key(data["key"])
# convert date to ISO format
if data.get("type") == "/type/datetime":
data["value"] = data["value"].replace(" ", "T")
return {k: _process_data(v) for k, v in data.items()}
else:
return data
def _make_sub(d):
"""Make substituter.
>>> f = _make_sub(dict(a='aa', bb='b'))
>>> f('aabbb')
'aaaabb'
"""
def f(a):
return d[a.group(0)]
rx = re.compile("|".join(re.escape(key) for key in d))
return lambda s: s and rx.sub(f, s)
_pgdecode_dict = {r"\n": "\n", r"\r": "\r", r"\t": "\t", r"\\": "\\"}
_pgdecode = _make_sub(_pgdecode_dict)
def pgdecode(text):
r"""Decode postgres encoded text.
>>> pgdecode('\\n')
'\n'
"""
return _pgdecode(text)
def main(cmd, args):
"""Command Line interface for generating dumps."""
iargs = iter(args)
args = []
kwargs = {}
for a in iargs:
if a.startswith("--"):
name = a[2:].replace("-", "_")
value = next(iargs)
kwargs[name] = value
else:
args.append(a)
func = {
"cdump": generate_cdump,
"dump": generate_dump,
"idump": generate_idump,
"sort": sort_dump,
"split": split_dump,
"index": make_index,
"sitemaps": generate_sitemaps,
"htmlindex": generate_html_index,
}.get(cmd)
if func:
func(*args, **kwargs)
else:
log(f"Unknown command: {cmd}")
logger.error(f"Unknown command: {cmd}")
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2:])
| 11,275 |
Python
|
.py
| 293 | 31.580205 | 87 | 0.592538 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
200 |
solr_types.py
|
internetarchive_openlibrary/openlibrary/solr/solr_types.py
|
# This file is auto-generated by types_generator.py
# fmt: off
from typing import Literal, TypedDict, Optional
class SolrDocument(TypedDict):
key: str
type: Literal['work', 'author', 'subject']
redirects: Optional[list[str]]
has_fulltext: Optional[bool]
title: Optional[str]
title_suggest: Optional[str]
title_sort: Optional[str]
subtitle: Optional[str]
alternative_title: Optional[list[str]]
alternative_subtitle: Optional[list[str]]
edition_count: Optional[int]
edition_key: Optional[list[str]]
cover_edition_key: Optional[str]
by_statement: Optional[list[str]]
publish_date: Optional[list[str]]
publish_year: Optional[list[int]]
first_publish_year: Optional[int]
first_edition: Optional[str]
first_publisher: Optional[str]
language: Optional[list[str]]
number_of_pages_median: Optional[int]
lccn: Optional[list[str]]
ia: Optional[list[str]]
ia_box_id: Optional[list[str]]
ia_loaded_id: Optional[list[str]]
ia_count: Optional[int]
ia_collection: Optional[list[str]]
oclc: Optional[list[str]]
isbn: Optional[list[str]]
ebook_access: Optional[Literal['no_ebook', 'unclassified', 'printdisabled', 'borrowable', 'public']]
lcc: Optional[list[str]]
lcc_sort: Optional[str]
ddc: Optional[list[str]]
ddc_sort: Optional[str]
contributor: Optional[list[str]]
publish_place: Optional[list[str]]
publisher: Optional[list[str]]
format: Optional[list[str]]
publisher_facet: Optional[list[str]]
first_sentence: Optional[list[str]]
author_key: Optional[list[str]]
author_name: Optional[list[str]]
author_alternative_name: Optional[list[str]]
author_facet: Optional[list[str]]
subject: Optional[list[str]]
subject_facet: Optional[list[str]]
subject_key: Optional[list[str]]
place: Optional[list[str]]
place_facet: Optional[list[str]]
place_key: Optional[list[str]]
person: Optional[list[str]]
person_facet: Optional[list[str]]
person_key: Optional[list[str]]
time: Optional[list[str]]
time_facet: Optional[list[str]]
time_key: Optional[list[str]]
ratings_average: Optional[float]
ratings_sortable: Optional[float]
ratings_count: Optional[int]
ratings_count_1: Optional[int]
ratings_count_2: Optional[int]
ratings_count_3: Optional[int]
ratings_count_4: Optional[int]
ratings_count_5: Optional[int]
readinglog_count: Optional[int]
want_to_read_count: Optional[int]
currently_reading_count: Optional[int]
already_read_count: Optional[int]
osp_count: Optional[int]
text: Optional[list[str]]
seed: Optional[list[str]]
name: Optional[str]
name_str: Optional[str]
alternate_names: Optional[list[str]]
birth_date: Optional[str]
death_date: Optional[str]
date: Optional[str]
work_count: Optional[int]
top_work: Optional[str]
top_subjects: Optional[list[str]]
subject_type: Optional[str]
public_scan_b: Optional[bool]
printdisabled_s: Optional[str]
lending_edition_s: Optional[str]
ia_collection_s: Optional[str]
ebook_count_i: Optional[int]
# fmt: on
| 3,176 |
Python
|
.py
| 91 | 30.087912 | 104 | 0.703115 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
201 |
query_utils.py
|
internetarchive_openlibrary/openlibrary/solr/query_utils.py
|
from typing import Literal, Optional
from collections.abc import Callable
from luqum.parser import parser
from luqum.tree import Item, SearchField, BaseOperation, Group, Word, Unary
import re
class EmptyTreeError(Exception):
pass
def luqum_remove_child(child: Item, parents: list[Item]):
"""
Removes a child from a luqum parse tree. If the tree
ends up being empty, errors.
:param child: Node to remove
:param parents: Path of parent nodes leading from the root of the tree
"""
parent = parents[-1] if parents else None
if parent is None:
# We cannot remove the element if it is the root of the tree
raise EmptyTreeError()
elif isinstance(parent, (BaseOperation, Group, Unary)):
new_children = tuple(c for c in parent.children if c != child)
if not new_children:
# If we have deleted all the children, we need to delete the parent
# as well. And potentially recurse up the tree.
luqum_remove_child(parent, parents[:-1])
else:
parent.children = new_children
else:
raise NotImplementedError(
f"Not implemented for Item subclass: {parent.__class__.__name__}"
)
def luqum_replace_child(parent: Item, old_child: Item, new_child: Item):
"""
Replaces a child in a luqum parse tree.
"""
if isinstance(parent, (BaseOperation, Group, Unary)):
new_children = tuple(
new_child if c == old_child else c for c in parent.children
)
parent.children = new_children
else:
raise ValueError("Not supported for generic class Item")
def luqum_traverse(item: Item, _parents: list[Item] | None = None):
"""
Traverses every node in the parse tree in depth-first order.
Does not make any guarantees about what will happen if you
modify the tree while traversing it 😅 But we do it anyways.
:param item: Node to traverse
:param _parents: Internal parameter for tracking parents
"""
parents = _parents or []
yield item, parents
new_parents = [*parents, item]
for child in item.children:
yield from luqum_traverse(child, new_parents)
def escape_unknown_fields(
query: str,
is_valid_field: Callable[[str], bool],
lower=True,
) -> str:
"""
Escapes the colon of any search field that is not deemed valid by the
predicate function `is_valid_field`.
:param query: Query to escape
:param is_valid_field: Predicate function that determines if a field is valid
:param lower: If true, the field will be lowercased before being checked
>>> escape_unknown_fields('title:foo', lambda field: False)
'title\\\\:foo'
>>> escape_unknown_fields('title:foo bar blah:bar baz:boo', lambda field: False)
'title\\\\:foo bar blah\\\\:bar baz\\\\:boo'
>>> escape_unknown_fields('title:foo bar', {'title'}.__contains__)
'title:foo bar'
>>> escape_unknown_fields('title:foo bar baz:boo', {'title'}.__contains__)
'title:foo bar baz\\\\:boo'
>>> escape_unknown_fields('title:foo bar baz:boo', {'TITLE'}.__contains__, lower=False)
'title\\\\:foo bar baz\\\\:boo'
>>> escape_unknown_fields('hi', {'title'}.__contains__)
'hi'
>>> escape_unknown_fields('(title:foo) OR (blah:bah)', {'title'}.__contains__)
'(title:foo) OR (blah\\\\:bah)'
"""
tree = parser.parse(query)
# Note we use the string of the tree, because it strips spaces
# like: "title : foo" -> "title:foo"
escaped_query = str(tree)
offset = 0
for sf, _ in luqum_traverse(tree):
if isinstance(sf, SearchField) and not is_valid_field(
sf.name.lower() if lower else sf.name
):
field = sf.name + r'\:'
if hasattr(sf, 'head'):
# head and tail are used for whitespace between fields;
# copy it along to the write space to avoid things smashing
# together
field = sf.head + field
# We will be moving left to right, so we need to adjust the offset
# to account for the characters we have already replaced
escaped_query = (
escaped_query[: sf.pos + offset]
+ field
+ escaped_query[sf.pos + len(field) - 1 + offset :]
)
offset += 1
return escaped_query
def fully_escape_query(query: str) -> str:
"""
Try to convert a query to basically a plain lucene string.
>>> fully_escape_query('title:foo')
'title\\\\:foo'
>>> fully_escape_query('title:foo bar')
'title\\\\:foo bar'
>>> fully_escape_query('title:foo (bar baz:boo)')
'title\\\\:foo \\\\(bar baz\\\\:boo\\\\)'
>>> fully_escape_query('x:[A TO Z}')
'x\\\\:\\\\[A TO Z\\\\}'
>>> fully_escape_query('foo AND bar')
'foo and bar'
>>> fully_escape_query("foo's bar")
"foo\\\\'s bar"
"""
escaped = query
# Escape special characters
escaped = re.sub(r'[\[\]\(\)\{\}:"\-+?~^/\\,\']', r'\\\g<0>', escaped)
# Remove boolean operators by making them lowercase
escaped = re.sub(r'AND|OR|NOT', lambda _1: _1.group(0).lower(), escaped)
return escaped
def luqum_parser(query: str) -> Item:
"""
Parses a lucene-like query, with the special binding rules of Open Library.
In our queries, unlike native solr/lucene, field names are greedy, and
affect the rest of the query until another field is hit.
Here are some examples. The first query is the native solr/lucene
parsing. The second is the parsing we want.
Query : title:foo bar
Lucene: (title:foo) bar
OL : (title:foo bar)
Query : title:foo OR bar AND author:blah
Lucene: (title:foo) OR (bar) AND (author:blah)
OL : (title:foo OR bar) AND (author:blah)
This requires an annoying amount of manipulation of the default
Luqum parser, unfortunately.
Also, OL queries allow spaces after fields.
"""
tree = parser.parse(query)
def find_next_word(item: Item) -> tuple[Word, BaseOperation | None] | None:
if isinstance(item, Word):
return item, None
elif isinstance(item, BaseOperation) and isinstance(item.children[0], Word):
return item.children[0], item
else:
return None
for node, parents in luqum_traverse(tree):
if isinstance(node, BaseOperation):
# if any of the children are SearchField followed by one or more words,
# we bundle them together
last_sf: SearchField = None
to_rem = []
for child in node.children:
if isinstance(child, SearchField) and isinstance(child.expr, Word):
last_sf = child
elif last_sf and (next_word := find_next_word(child)):
word, parent_op = next_word
# Add it over
if not isinstance(last_sf.expr, Group):
last_sf.expr = Group(type(node)(last_sf.expr, word))
last_sf.expr.tail = word.tail
word.tail = ''
else:
last_sf.expr.expr.children[-1].tail = last_sf.expr.tail
last_sf.expr.expr.children += (word,)
last_sf.expr.tail = word.tail
word.tail = ''
if parent_op:
# A query like: 'title:foo blah OR author:bar
# Lucene parses as: (title:foo) ? (blah OR author:bar)
# We want : (title:foo ? blah) OR (author:bar)
node.op = parent_op.op
node.children += (*parent_op.children[1:],)
to_rem.append(child)
else:
last_sf = None
if len(to_rem) == len(node.children) - 1:
# We only have the searchfield left!
if parents:
# Move the head to the next element
last_sf.head = node.head
parents[-1].children = tuple(
child if child is not node else last_sf
for child in parents[-1].children
)
else:
tree = last_sf
break
else:
node.children = tuple(
child for child in node.children if child not in to_rem
)
# Remove spaces before field names
for node, parents in luqum_traverse(tree):
if isinstance(node, SearchField):
node.expr.head = ''
return tree
def query_dict_to_str(
escaped: dict | None = None,
unescaped: dict | None = None,
op: Literal['AND', 'OR', ''] = '',
phrase: bool = False,
) -> str:
"""
Converts a query dict to a search query.
>>> query_dict_to_str({'title': 'foo'})
'title:(foo)'
>>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'})
'title:(foo bar) author:(bar)'
>>> query_dict_to_str({'title': 'foo bar', 'author': 'bar'}, op='OR')
'title:(foo bar) OR author:(bar)'
>>> query_dict_to_str({'title': 'foo ? to escape'})
'title:(foo \\\\? to escape)'
>>> query_dict_to_str({'title': 'YES AND'})
'title:(YES and)'
>>> query_dict_to_str({'publisher_facet': 'Running Press'}, phrase=True)
'publisher_facet:"Running Press"'
"""
result = ''
if escaped:
result += f' {op} '.join(
(
f'{k}:"{fully_escape_query(v)}"'
if phrase
else f'{k}:({fully_escape_query(v)})'
)
for k, v in escaped.items()
)
if unescaped:
if result:
result += f' {op} '
result += f' {op} '.join(f'{k}:{v}' for k, v in unescaped.items())
return result
def luqum_replace_field(query: Item, replacer: Callable[[str], str]) -> None:
"""
In-place replaces portions of a field, as indicated by the replacement function.
:param query: Passed in the form of a luqum tree
:param replacer: function called on each query.
"""
for sf, _ in luqum_traverse(query):
if isinstance(sf, SearchField):
sf.name = replacer(sf.name)
def luqum_remove_field(query: Item, predicate: Callable[[str], bool]) -> None:
"""
In-place removes fields from a query, as indicated by the predicate function.
:param query: Passed in the form of a luqum tree
:param predicate: function called on each query.
"""
for sf, parents in luqum_traverse(query):
if isinstance(sf, SearchField) and predicate(sf.name):
luqum_remove_child(sf, parents)
| 10,787 |
Python
|
.py
| 256 | 33.011719 | 91 | 0.584692 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
202 |
utils.py
|
internetarchive_openlibrary/openlibrary/solr/utils.py
|
from dataclasses import dataclass, field
import json
import logging
import httpx
from httpx import HTTPError, HTTPStatusError, TimeoutException
from openlibrary import config
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.utils.retry import MaxRetriesExceeded, RetryStrategy
logger = logging.getLogger("openlibrary.solr")
solr_base_url = None
solr_next: bool | None = None
def load_config(c_config='conf/openlibrary.yml'):
if not config.runtime_config:
config.load(c_config)
config.load_config(c_config)
def get_solr_base_url():
"""
Get Solr host
:rtype: str
"""
global solr_base_url
load_config()
if not solr_base_url:
solr_base_url = config.runtime_config['plugin_worksearch']['solr_base_url']
return solr_base_url
def set_solr_base_url(solr_url: str):
global solr_base_url
solr_base_url = solr_url
def get_solr_next() -> bool:
"""
Get whether this is the next version of solr; ie new schema configs/fields, etc.
"""
global solr_next
if solr_next is None:
load_config()
solr_next = config.runtime_config['plugin_worksearch'].get('solr_next', False)
return solr_next
def set_solr_next(val: bool):
global solr_next
solr_next = val
@dataclass
class SolrUpdateRequest:
adds: list[SolrDocument] = field(default_factory=list)
"""Records to be added/modified"""
deletes: list[str] = field(default_factory=list)
"""Records to be deleted"""
commit: bool = False
# Override the + operator
def __add__(self, other):
if isinstance(other, SolrUpdateRequest):
return SolrUpdateRequest(
adds=self.adds + other.adds,
deletes=self.deletes + other.deletes,
commit=self.commit or other.commit,
)
else:
raise TypeError(f"Cannot add {type(self)} and {type(other)}")
def has_changes(self) -> bool:
return bool(self.adds or self.deletes)
def to_solr_requests_json(self, indent: int | str | None = None, sep=',') -> str:
result = '{'
if self.deletes:
result += f'"delete": {json.dumps(self.deletes, indent=indent)}' + sep
for doc in self.adds:
result += f'"add": {json.dumps({"doc": doc}, indent=indent)}' + sep
if self.commit:
result += '"commit": {}' + sep
if result.endswith(sep):
result = result[: -len(sep)]
result += '}'
return result
def clear_requests(self) -> None:
self.adds.clear()
self.deletes.clear()
def solr_update(
update_request: SolrUpdateRequest,
skip_id_check=False,
solr_base_url: str | None = None,
) -> None:
content = update_request.to_solr_requests_json()
solr_base_url = solr_base_url or get_solr_base_url()
params = {
# Don't fail the whole batch if one bad apple
'update.chain': 'tolerant-chain'
}
if skip_id_check:
params['overwrite'] = 'false'
def make_request():
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
try:
resp = httpx.post(
f'{solr_base_url}/update',
# Large batches especially can take a decent chunk of time
timeout=300,
params=params,
headers={'Content-Type': 'application/json'},
content=content,
)
if resp.status_code == 400:
resp_json = resp.json()
indiv_errors = resp_json.get('responseHeader', {}).get('errors', [])
if indiv_errors:
for e in indiv_errors:
logger.error(f'Individual Solr POST Error: {e}')
global_error = resp_json.get('error')
if global_error:
logger.error(f'Global Solr POST Error: {global_error.get("msg")}')
if not (indiv_errors or global_error):
# We can handle the above errors. Any other 400 status codes
# are fatal and should cause a retry
resp.raise_for_status()
else:
resp.raise_for_status()
except HTTPStatusError as e:
logger.error(f'HTTP Status Solr POST Error: {e}')
raise
except TimeoutException:
logger.error(f'Timeout Solr POST Error: {content}')
raise
except HTTPError as e:
logger.error(f'HTTP Solr POST Error: {e}')
raise
retry = RetryStrategy(
[HTTPStatusError, TimeoutException, HTTPError],
max_retries=5,
delay=8,
)
try:
return retry(make_request)
except MaxRetriesExceeded as e:
logger.error(f'Max retries exceeded for Solr POST: {e.last_exception}')
async def solr_insert_documents(
documents: list[dict],
solr_base_url: str | None = None,
skip_id_check=False,
):
"""
Note: This has only been tested with Solr 8, but might work with Solr 3 as well.
"""
solr_base_url = solr_base_url or get_solr_base_url()
params = {}
if skip_id_check:
params['overwrite'] = 'false'
logger.debug(f"POSTing update to {solr_base_url}/update {params}")
async with httpx.AsyncClient() as client:
resp = await client.post(
f'{solr_base_url}/update',
timeout=30, # seconds; the default timeout is silly short
params=params,
headers={'Content-Type': 'application/json'},
content=json.dumps(documents),
)
resp.raise_for_status()
def str_to_key(s):
"""
Convert a string to a valid Solr field name.
TODO: this exists in openlibrary/utils/__init__.py str_to_key(), DRY
:param str s:
:rtype: str
"""
to_drop = set(''';/?:@&=+$,<>#%"{}|\\^[]`\n\r''')
return ''.join(c if c != ' ' else '_' for c in s.lower() if c not in to_drop)
| 6,011 |
Python
|
.py
| 162 | 28.697531 | 86 | 0.601723 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
203 |
types_generator.py
|
internetarchive_openlibrary/openlibrary/solr/types_generator.py
|
#!/usr/bin/env python
import os
root = os.path.dirname(__file__)
OVERRIDES = {
'type': "Literal['work', 'author', 'subject']",
'public_scan_b': 'Optional[bool]',
'printdisabled_s': 'Optional[str]',
'lending_edition_s': 'Optional[str]',
'ia_collection_s': 'Optional[str]',
'ebook_count_i': 'Optional[int]',
}
def generate():
"""This function generates the types.py file."""
import xml.etree.ElementTree as ET
# read the managed-schema xml file
solr_schema = ET.parse(
os.path.join(root, '../../conf/solr/conf/managed-schema.xml')
)
python_fields: list[str] = []
seen_names: set[str] = set()
for field in solr_schema.getroot().findall('field'):
name = field.get('name')
if name.startswith('_'):
continue
required = field.get('required') == 'true'
typ = field.get('type')
multivalued = field.get('multiValued') == 'true'
type_map = {
'pint': 'int',
'string': 'str',
'text_en_splitting': 'str',
'text_general': 'str',
'text_international': 'str',
'text_title_sort': 'str',
'boolean': 'bool',
'pfloat': 'float',
}
if name in OVERRIDES:
python_type = OVERRIDES[name]
elif typ in type_map:
python_type = type_map[typ]
elif (
field_type := solr_schema.find(f".//fieldType[@name='{typ}']")
) is not None:
field_class = field_type.get('class')
if field_class == 'solr.EnumFieldType':
enumsConfigFile = field_type.get('enumsConfig')
enumsConfig = ET.parse(
os.path.join(root, '../../conf/solr/conf/', enumsConfigFile)
)
enum_values = [
el.text
for el in enumsConfig.findall(
f".//enum[@name='{field_type.get('enumName')}']/value"
)
]
python_type = f"Literal[{', '.join(map(repr, enum_values))}]"
else:
raise Exception(f"Unknown field type class {field_class}")
else:
raise Exception(f"Unknown field type {typ}")
if name not in OVERRIDES:
if multivalued:
python_type = f"list[{python_type}]"
if not required:
python_type = f"Optional[{python_type}]"
seen_names.add(name)
python_fields.append(f" {name}: {python_type}")
for key in OVERRIDES:
if key not in seen_names:
python_fields.append(f" {key}: {OVERRIDES[key]}")
body = '\n'.join(python_fields)
python = f"""# This file is auto-generated by types_generator.py
# fmt: off
from typing import Literal, TypedDict, Optional
class SolrDocument(TypedDict):
{body}
# fmt: on"""
return python
if __name__ == '__main__':
print(generate())
| 2,981 |
Python
|
.py
| 81 | 27.111111 | 80 | 0.539528 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
204 |
update.py
|
internetarchive_openlibrary/openlibrary/solr/update.py
|
import functools
import logging
from pathlib import Path
from typing import Literal, cast
import aiofiles
import json
import web
from openlibrary.catalog.utils.query import set_query_host
from openlibrary.solr.data_provider import (
get_data_provider,
DataProvider,
ExternalDataProvider,
)
from openlibrary.solr.updater.abstract import AbstractSolrUpdater
from openlibrary.solr.updater.author import AuthorSolrUpdater
from openlibrary.solr.updater.edition import EditionSolrUpdater
from openlibrary.solr.updater.list import ListSolrUpdater
from openlibrary.solr.updater.work import WorkSolrUpdater
from openlibrary.solr.utils import (
SolrUpdateRequest,
load_config,
set_solr_base_url,
set_solr_next,
solr_update,
)
from openlibrary.utils import uniq
from openlibrary.utils.open_syllabus_project import set_osp_dump_location
logger = logging.getLogger("openlibrary.solr")
# This will be set to a data provider; have faith, mypy!
data_provider = cast(DataProvider, None)
@functools.cache
def get_solr_updaters() -> list[AbstractSolrUpdater]:
global data_provider
assert data_provider is not None
return [
# ORDER MATTERS
EditionSolrUpdater(data_provider),
WorkSolrUpdater(data_provider),
AuthorSolrUpdater(data_provider),
ListSolrUpdater(data_provider),
]
def can_update_key(key: str) -> bool:
return any(updater.key_test(key) for updater in get_solr_updaters())
async def update_keys(
keys: list[str],
commit=True,
output_file=None,
skip_id_check=False,
update: Literal['update', 'print', 'pprint', 'quiet'] = 'update',
) -> SolrUpdateRequest:
"""
Insert/update the documents with the provided keys in Solr.
:param list[str] keys: Keys to update (ex: ["/books/OL1M"]).
:param bool commit: Create <commit> tags to make Solr persist the changes (and make the public/searchable).
:param str output_file: If specified, will save all update actions to output_file **instead** of sending to Solr.
Each line will be JSON object.
FIXME Updates to editions/subjects ignore output_file and will be sent (only) to Solr regardless.
"""
logger.debug("BEGIN update_keys")
def _solr_update(update_state: SolrUpdateRequest):
if update == 'update':
return solr_update(update_state, skip_id_check)
elif update == 'pprint':
print(update_state.to_solr_requests_json(sep='\n', indent=4))
elif update == 'print':
print(update_state.to_solr_requests_json(sep='\n'))
elif update == 'quiet':
pass
global data_provider
if data_provider is None:
data_provider = get_data_provider('default')
net_update = SolrUpdateRequest(commit=commit)
for updater in get_solr_updaters():
update_state = SolrUpdateRequest(commit=commit)
updater_keys = uniq(k for k in keys if updater.key_test(k))
await updater.preload_keys(updater_keys)
for key in updater_keys:
logger.debug(f"processing {key}")
try:
thing = await data_provider.get_document(key)
if thing and thing['type']['key'] == '/type/redirect':
logger.warning("Found redirect to %r", thing['location'])
# When the given key is not found or redirects to another thing,
# explicitly delete the key. It won't get deleted otherwise.
update_state.deletes.append(thing['key'])
thing = await data_provider.get_document(thing['location'])
if not thing:
logger.warning("No thing found for key %r. Ignoring...", key)
continue
if thing['type']['key'] == '/type/delete':
logger.info(
"%r has type %r. queuing for deleting it solr.",
thing['key'],
thing['type']['key'],
)
update_state.deletes.append(thing['key'])
else:
new_update_state, new_keys = await updater.update_key(thing)
update_state += new_update_state
keys += new_keys
except: # noqa: E722
logger.error("Failed to update %r", key, exc_info=True)
if update_state.has_changes():
if output_file:
async with aiofiles.open(output_file, "w") as f:
for doc in update_state.adds:
await f.write(f"{json.dumps(doc)}\n")
else:
_solr_update(update_state)
net_update += update_state
logger.debug("END update_keys")
return net_update
async def do_updates(keys):
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
await update_keys(keys, commit=False)
def load_configs(
c_host: str,
c_config: str,
c_data_provider: (
DataProvider | Literal["default", "legacy", "external"]
) = 'default',
) -> DataProvider:
host = web.lstrips(c_host, "http://").strip("/")
set_query_host(host)
load_config(c_config)
global data_provider
if data_provider is None:
if isinstance(c_data_provider, DataProvider):
data_provider = c_data_provider
elif c_data_provider == 'external':
data_provider = ExternalDataProvider(host)
else:
data_provider = get_data_provider(c_data_provider)
return data_provider
async def main(
keys: list[str],
osp_dump: Path | None = None,
ol_url="http://openlibrary.org",
ol_config="openlibrary.yml",
output_file: str | None = None,
commit=True,
data_provider: Literal['default', 'legacy', 'external'] = "default",
solr_base: str | None = None,
solr_next=False,
update: Literal['update', 'print', 'pprint'] = 'update',
):
"""
Insert the documents with the given keys into Solr.
:param keys: The keys of the items to update (ex: /books/OL1M)
:param ol_url: URL of the openlibrary website
:param ol_config: Open Library config file
:param output_file: Where to save output
:param commit: Whether to also trigger a Solr commit
:param data_provider: Name of the data provider to use
:param solr_base: If wanting to override openlibrary.yml
:param solr_next: Whether to assume schema of next solr version is active
:param update: Whether/how to do the actual solr update call
"""
load_configs(ol_url, ol_config, data_provider)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
if keys[0].startswith('//'):
keys = [k[1:] for k in keys]
if solr_base:
set_solr_base_url(solr_base)
set_solr_next(solr_next)
set_osp_dump_location(osp_dump)
await update_keys(keys, commit=commit, output_file=output_file, update=update)
if __name__ == '__main__':
from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI
FnToCLI(main).run()
| 7,160 |
Python
|
.py
| 174 | 33.241379 | 117 | 0.645941 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
205 |
data_provider.py
|
internetarchive_openlibrary/openlibrary/solr/data_provider.py
|
"""Module to provide data for solr indexer.
This module has all the logic for querying different sources for getting the
data required for solr.
Multiple data providers are supported, each is good for different use case.
"""
import asyncio
import itertools
import logging
import re
from typing import Optional, TypedDict, cast
from collections.abc import Iterable, Sized
import httpx
from httpx import HTTPError
import requests
import web
from web import DB
from infogami.infobase.client import Site
from openlibrary.core import ia
from openlibrary.core.bookshelves import Bookshelves
from openlibrary.core.ratings import Ratings, WorkRatingsSummary
from openlibrary.utils import extract_numeric_id_from_olid
logger = logging.getLogger("openlibrary.solr.data_provider")
IA_METADATA_FIELDS = ('identifier', 'boxid', 'collection', 'access-restricted-item')
OCAID_PATTERN = re.compile(r'^[^\s&#?/]+$')
def get_data_provider(type="default"):
"""Returns the data provider of given type."""
if type == "default":
return BetterDataProvider()
elif type == "legacy":
return LegacyDataProvider()
else:
raise ValueError("unknown data provider type: %s" % type)
def is_valid_ocaid(ocaid: str):
return bool(OCAID_PATTERN.match(ocaid))
def batch(items: list, max_batch_len: int):
"""
>>> list(batch([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(batch([], 2))
[]
>>> list(batch([1,2,3,4,5], 3))
[[1, 2, 3], [4, 5]]
>>> list(batch([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
>>> list(batch([1,2,3,4,5], 6))
[[1, 2, 3, 4, 5]]
"""
start = 0
while start < len(items):
yield items[start : start + max_batch_len]
start += max_batch_len
def batch_until_len(items: Iterable[Sized], max_batch_len: int):
batch_len = 0
batch: list[Sized] = []
for item in items:
if batch_len + len(item) > max_batch_len and batch:
yield batch
batch = [item]
batch_len = len(item)
else:
batch.append(item)
batch_len += len(item)
if batch:
yield batch
def partition(lst: list, parts: int):
"""
>>> list(partition([1,2,3,4,5,6], 1))
[[1, 2, 3, 4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 2))
[[1, 2, 3], [4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 3))
[[1, 2], [3, 4], [5, 6]]
>>> list(partition([1,2,3,4,5,6], 4))
[[1], [2], [3], [4, 5, 6]]
>>> list(partition([1,2,3,4,5,6], 5))
[[1], [2], [3], [4], [5, 6]]
>>> list(partition([1,2,3,4,5,6], 6))
[[1], [2], [3], [4], [5], [6]]
>>> list(partition([1,2,3,4,5,6], 7))
[[1], [2], [3], [4], [5], [6]]
>>> list(partition([1,2,3,4,5,6,7], 3))
[[1, 2], [3, 4], [5, 6, 7]]
>>> list(partition([], 5))
[]
"""
if not lst:
return
total_len = len(lst)
parts = min(total_len, parts)
size = total_len // parts
for i in range(parts):
start = i * size
end = total_len if (i == parts - 1) else ((i + 1) * size)
yield lst[start:end]
class WorkReadingLogSolrSummary(TypedDict):
readinglog_count: int
want_to_read_count: int
currently_reading_count: int
already_read_count: int
class DataProvider:
"""
DataProvider is the interface for solr updater
to get additional information for building solr index.
This is an abstract class and multiple implementations are provided
in this module.
"""
def __init__(self) -> None:
self.ia_cache: dict[str, dict | None] = {}
@staticmethod
async def _get_lite_metadata(ocaids: list[str], _recur_depth=0, _max_recur_depth=3):
"""
For bulk fetch, some of the ocaids in Open Library may be bad
and break archive.org ES fetches. When this happens, we (up to
3 times) recursively split up the pool of ocaids to do as many
successful sub-bulk fetches as we can and then when limit is
reached, downstream code will fetch remaining ocaids individually
(and skip bad ocaids)
"""
if not ocaids or _recur_depth > _max_recur_depth:
logger.warning(
'Max recursion exceeded trying fetch IA data', extra={'ocaids': ocaids}
)
return []
try:
async with httpx.AsyncClient() as client:
r = await client.get(
"https://archive.org/advancedsearch.php",
timeout=30, # The default is silly short
headers={
'x-application-id': 'ol-solr',
},
params={
'q': f"identifier:({' OR '.join(ocaids)})",
'rows': len(ocaids),
'fl': ','.join(IA_METADATA_FIELDS),
'page': 1,
'output': 'json',
'save': 'yes',
'service': 'metadata__unlimited',
},
)
r.raise_for_status()
return r.json()['response']['docs']
except HTTPError:
logger.warning("IA bulk query failed")
except (ValueError, KeyError):
logger.warning(f"IA bulk query failed {r.status_code}: {r.json()['error']}")
# Only here if an exception occurred
# there's probably a bad apple; try splitting the batch
parts = await asyncio.gather(
*(
DataProvider._get_lite_metadata(part, _recur_depth=_recur_depth + 1)
for part in partition(ocaids, 6)
)
)
return list(itertools.chain(*parts))
@staticmethod
async def _get_lite_metadata_direct(ocaid: str):
try:
async with httpx.AsyncClient() as client:
r = await client.get(
f"https://archive.org/metadata/{ocaid}/metadata",
timeout=30, # The default is silly short
)
r.raise_for_status()
response = r.json()
if 'error' not in response:
lite_metadata = {
key: response['result'][key]
for key in IA_METADATA_FIELDS
if key in response['result']
}
return lite_metadata
else:
return {
'error': response['error'],
'identifier': ocaid,
}
except HTTPError:
logger.warning(f'Error fetching metadata for {ocaid}')
return None
async def get_document(self, key):
"""Returns the document with specified key from the database.
:param str key: type-prefixed key (ex: /books/OL1M)
:rtype: dict
"""
raise NotImplementedError()
def get_metadata(self, identifier: str):
if identifier in self.ia_cache:
logger.debug("IA metadata cache hit")
return self.ia_cache[identifier]
elif not is_valid_ocaid(identifier):
return None
else:
logger.debug("IA metadata cache miss")
return ia.get_metadata_direct(identifier)
async def preload_documents(self, keys: Iterable[str]):
"""
Preload a set of documents in a single request. Should make subsequent calls to
get_document faster.
"""
pass
async def preload_metadata(self, ocaids: list[str]):
invalid_ocaids = {ocaid for ocaid in ocaids if not is_valid_ocaid(ocaid)}
if invalid_ocaids:
logger.warning(f"Trying to cache invalid OCAIDs: {invalid_ocaids}")
valid_ocaids = list(set(ocaids) - invalid_ocaids)
batches = list(batch_until_len(valid_ocaids, 3000))
# Start them all async
tasks = [asyncio.create_task(self._get_lite_metadata(b)) for b in batches]
for task in tasks:
for doc in await task:
self.ia_cache[doc['identifier']] = doc
missing_ocaids = [ocaid for ocaid in valid_ocaids if ocaid not in self.ia_cache]
missing_ocaid_batches = list(batch(missing_ocaids, 6))
for missing_batch in missing_ocaid_batches:
# Start them all async
tasks = [
asyncio.create_task(self._get_lite_metadata_direct(ocaid))
for ocaid in missing_batch
]
for task in tasks:
lite_metadata = await task
if lite_metadata:
self.ia_cache[lite_metadata['identifier']] = lite_metadata
def preload_editions_of_works(self, work_keys: Iterable[str]):
"""
Preload the editions of the provided works. Should make subsequent calls to
get_editions_of_work faster.
:param list of str work_keys: type-prefixed keys to work keys (ex: /works/OL1W)
:return: None
"""
pass
def find_redirects(self, key):
"""
Returns keys of all things which redirect to this one.
:param str key: type-prefixed key
:rtype: list of str
"""
raise NotImplementedError()
def get_editions_of_work(self, work):
"""
:param dict work: work object
:rtype: list of dict
"""
raise NotImplementedError()
def get_work_ratings(self, work_key: str) -> WorkRatingsSummary | None:
raise NotImplementedError()
def get_work_reading_log(self, work_key: str) -> WorkReadingLogSolrSummary | None:
raise NotImplementedError()
def clear_cache(self):
self.ia_cache.clear()
class LegacyDataProvider(DataProvider):
def __init__(self):
from openlibrary.catalog.utils.query import query_iter, withKey
super().__init__()
self._query_iter = query_iter
self._withKey = withKey
def find_redirects(self, key):
"""Returns keys of all things which are redirected to this one."""
logger.info("find_redirects %s", key)
q = {'type': '/type/redirect', 'location': key}
return [r['key'] for r in self._query_iter(q)]
def get_editions_of_work(self, work):
logger.info("find_editions_of_work %s", work['key'])
q = {'type': '/type/edition', 'works': work['key'], '*': None}
return list(self._query_iter(q))
async def get_document(self, key):
logger.info("get_document %s", key)
return self._withKey(key)
def get_work_ratings(self, work_key: str) -> WorkRatingsSummary | None:
work_id = int(work_key[len('/works/OL') : -len('W')])
return Ratings.get_work_ratings_summary(work_id)
def get_work_reading_log(self, work_key: str) -> WorkReadingLogSolrSummary:
work_id = extract_numeric_id_from_olid(work_key)
counts = Bookshelves.get_work_summary(work_id)
return cast(
WorkReadingLogSolrSummary,
{
'readinglog_count': sum(counts.values()),
**{f'{shelf}_count': count for shelf, count in counts.items()},
},
)
def clear_cache(self):
# Nothing's cached, so nothing to clear!
return
class ExternalDataProvider(DataProvider):
"""
Only used for local env, this data provider fetches data using public OL apis
"""
def __init__(self, ol_host: str):
super().__init__()
self.ol_host = ol_host
def find_redirects(self, key: str):
# NOT IMPLEMENTED
return []
def get_editions_of_work(self, work):
resp = requests.get(
f"http://{self.ol_host}{work['key']}/editions.json", params={'limit': 500}
).json()
if 'next' in resp['links']:
logger.warning(f"Too many editions for {work['key']}")
return resp['entries']
async def get_document(self, key: str):
async with httpx.AsyncClient() as client:
response = await client.get(f"http://{self.ol_host}{key}.json")
return response.json()
class BetterDataProvider(LegacyDataProvider):
def __init__(
self,
site: Site | None = None,
db: DB | None = None,
):
"""Test with
import web; import infogami
from openlibrary.config import load_config
load_config('/openlibrary/config/openlibrary.yml')
infogami._setup()
from infogami import config
"""
super().__init__()
# cache for documents
self.cache: dict[str, dict] = {}
# cache for redirects
self.redirect_cache: dict[str, list[str]] = {}
self.edition_keys_of_works_cache: dict[str, list[str]] = {}
import infogami
from infogami.utils import delegate
# web.ctx might not be defined at this time -_-
self.get_site = lambda: site or web.ctx.site
if not db:
infogami._setup()
delegate.fakeload()
from openlibrary.core.db import get_db
self.db: DB = get_db()
else:
self.db = db
async def get_document(self, key):
# logger.info("get_document %s", key)
if key not in self.cache:
await self.preload_documents([key])
if key not in self.cache:
logger.warning("NOT FOUND %s", key)
return self.cache.get(key) or {"key": key, "type": {"key": "/type/delete"}}
async def preload_documents(self, keys: Iterable[str]):
keys2 = set(keys)
# keys2.update(k for k in self.ia_redirect_cache.values() if k is not None)
self.preload_documents0(keys2)
self._preload_works()
self._preload_authors()
self._preload_editions()
await self._preload_metadata_of_editions()
# for all works and authors, find redirects as they'll requested later
keys3 = [k for k in self.cache if k.startswith(("/works/", "/authors/"))]
self.preload_redirects(keys3)
def preload_documents0(self, keys):
keys = [k for k in keys if k not in self.cache]
if not keys:
return
logger.info("preload_documents0 %s", keys)
for chunk in web.group(keys, 100):
docs = self.get_site().get_many(list(chunk))
for doc in docs:
self.cache[doc['key']] = doc.dict()
def _preload_works(self):
"""Preloads works for all editions in the cache."""
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/edition' and doc.get('works'):
keys.append(doc['works'][0]['key'])
# print "preload_works, found keys", keys
self.preload_documents0(keys)
def _preload_editions(self):
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/work':
keys.append(doc['key'])
self.preload_editions_of_works(keys)
async def _preload_metadata_of_editions(self):
identifiers = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/edition' and doc.get('ocaid'):
identifiers.append(doc['ocaid'])
# source_records = doc.get("source_records", [])
# identifiers.extend(r[len("ia:"):] for r in source_records if r.startswith("ia:"))
await self.preload_metadata(identifiers)
def _preload_authors(self):
"""Preloads authors for all works in the cache."""
keys = []
for doc in self.cache.values():
if doc and doc['type']['key'] == '/type/work' and doc.get('authors'):
keys.extend(a['author']['key'] for a in doc['authors'])
if doc and doc['type']['key'] == '/type/edition' and doc.get('authors'):
keys.extend(a['key'] for a in doc['authors'])
self.preload_documents0(list(set(keys)))
def find_redirects(self, key):
"""Returns all the keys that are redirected to this."""
self.preload_redirects([key])
return self.redirect_cache[key]
def preload_redirects(self, keys):
keys = [k for k in keys if k not in self.redirect_cache]
if not keys:
return
logger.info("preload_redirects %s", keys)
for chunk in web.group(keys, 100):
self._preload_redirects0(list(chunk))
def _preload_redirects0(self, keys):
query = {
"type": "/type/redirect",
"location": keys,
"a:location": None, # asking it to fill location in results
}
for k in keys:
self.redirect_cache.setdefault(k, [])
matches = self.get_site().things(query, details=True)
for thing in matches:
# we are trying to find documents that are redirecting to each of the given keys
self.redirect_cache[thing.location].append(thing.key)
def get_editions_of_work(self, work):
wkey = work['key']
self.preload_editions_of_works([wkey])
edition_keys = self.edition_keys_of_works_cache.get(wkey, [])
return [self.cache[k] for k in edition_keys]
def preload_editions_of_works(self, work_keys: Iterable[str]):
work_keys = [
wkey for wkey in work_keys if wkey not in self.edition_keys_of_works_cache
]
if not work_keys:
return
logger.info("preload_editions_of_works %s ..", work_keys[:5])
# Infobase doesn't has a way to do find editions of multiple works at once.
# Using raw SQL to avoid making individual infobase queries, which is very
# time consuming.
key_query = (
"select id from property where name='works'"
" and type=(select id from thing where key='/type/edition')"
)
q = (
"SELECT edition.key as edition_key, work.key as work_key"
" FROM thing as edition, thing as work, edition_ref"
" WHERE edition_ref.thing_id=edition.id"
" AND edition_ref.value=work.id"
f" AND edition_ref.key_id=({key_query})"
" AND work.key in $keys"
)
result = self.db.query(q, vars={"keys": work_keys})
for row in result:
self.edition_keys_of_works_cache.setdefault(row.work_key, []).append(
row.edition_key
)
keys = [k for _keys in self.edition_keys_of_works_cache.values() for k in _keys]
self.preload_documents0(keys)
return
def clear_cache(self):
super().clear_cache()
self.cache.clear()
self.redirect_cache.clear()
self.edition_keys_of_works_cache.clear()
| 18,714 |
Python
|
.py
| 458 | 31.360262 | 99 | 0.580283 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
206 |
author.py
|
internetarchive_openlibrary/openlibrary/solr/updater/author.py
|
from typing import cast
import typing
import httpx
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url
from openlibrary.solr.data_provider import WorkReadingLogSolrSummary
from openlibrary.core.ratings import WorkRatingsSummary, Ratings
SUBJECT_FACETS = ['subject_facet', 'time_facet', 'person_facet', 'place_facet']
class AuthorSolrUpdater(AbstractSolrUpdater):
key_prefix = '/authors/'
thing_type = '/type/author'
async def update_key(self, author: dict) -> tuple[SolrUpdateRequest, list[str]]:
author_id = author['key'].split("/")[-1]
base_url = get_solr_base_url() + '/query'
json: dict[str, typing.Any] = {
"params": {
"json.nl": "arrarr",
"q": "author_key:%s " % author_id,
"fq": "type:work",
"fl": "title, subtitle",
"sort": "edition_count desc",
},
'facet': {
"ratings_count_1": "sum(ratings_count_1)",
"ratings_count_2": "sum(ratings_count_2)",
"ratings_count_3": "sum(ratings_count_3)",
"ratings_count_4": "sum(ratings_count_4)",
"ratings_count_5": "sum(ratings_count_5)",
"readinglog_count": "sum(readinglog_count)",
"want_to_read_count": "sum(want_to_read_count)",
"currently_reading_count": "sum(currently_reading_count)",
"already_read_count": "sum(already_read_count)",
},
}
for field in SUBJECT_FACETS:
json["facet"][field] = {
"type": "terms",
"field": field,
}
async with httpx.AsyncClient() as client:
response = await client.post(
base_url,
timeout=30,
json=json,
)
reply = response.json()
doc = AuthorSolrBuilder(author, reply).build()
return SolrUpdateRequest(adds=[doc]), []
class AuthorSolrBuilder(AbstractSolrBuilder):
def __init__(self, author: dict, solr_reply: dict):
self._author = author
self._solr_reply = solr_reply
@property
def key(self) -> str:
return self._author['key']
@property
def type(self) -> str:
return 'author'
@property
def name(self) -> str | None:
return self._author.get('name')
@property
def alternate_names(self) -> list[str]:
return self._author.get('alternate_names', [])
@property
def birth_date(self) -> str | None:
return self._author.get('birth_date')
@property
def death_date(self) -> str | None:
return self._author.get('death_date')
@property
def date(self) -> str | None:
"""I think this is legacy?"""
return self._author.get('date')
@property
def top_work(self) -> str | None:
docs = self._solr_reply['response'].get('docs', [])
if docs and docs[0].get('title', None):
top_work = docs[0]['title']
if docs[0].get('subtitle', None):
top_work += ': ' + docs[0]['subtitle']
return top_work
return None
@property
def work_count(self) -> int:
return self._solr_reply['response']['numFound']
@property
def top_subjects(self) -> list[str]:
all_subjects = []
for field in SUBJECT_FACETS:
if facet := self._solr_reply['facets'].get(field):
for bucket in facet['buckets']:
all_subjects.append((bucket["count"], bucket["val"]))
all_subjects.sort(reverse=True)
return [top_facets for num, top_facets in all_subjects[:10]]
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_ratings()
doc |= self.build_reading_log()
return cast(SolrDocument, doc)
def build_ratings(self) -> WorkRatingsSummary:
return Ratings.work_ratings_summary_from_counts(
[
self._solr_reply["facets"].get(f"ratings_count_{index}", 0)
for index in range(1, 6)
]
)
def build_reading_log(self) -> WorkReadingLogSolrSummary:
reading_log = {
"want_to_read_count": self._solr_reply["facets"].get(
"want_to_read_count", 0.0
),
"already_read_count": self._solr_reply["facets"].get(
"already_read_count", 0.0
),
"currently_reading_count": self._solr_reply["facets"].get(
"currently_reading_count", 0.0
),
"readinglog_count": self._solr_reply["facets"].get("readinglog_count", 0.0),
}
return cast(WorkReadingLogSolrSummary, reading_log)
| 4,951 |
Python
|
.py
| 122 | 30.47541 | 88 | 0.569288 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
207 |
edition.py
|
internetarchive_openlibrary/openlibrary/solr/updater/edition.py
|
from functools import cached_property
import logging
import re
from typing import TYPE_CHECKING, cast
import requests
import openlibrary.book_providers as bp
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url
from openlibrary.utils import uniq
from openlibrary.utils.isbn import opposite_isbn
if TYPE_CHECKING:
from openlibrary.solr.updater.work import WorkSolrBuilder
logger = logging.getLogger("openlibrary.solr")
re_edition_key_basename = re.compile("^[a-zA-Z0-9:.-]+$")
re_lang_key = re.compile(r'^/(?:l|languages)/([a-z]{3})$')
re_year = re.compile(r'\b(\d{4})\b')
re_solr_field = re.compile(r'^[-\w]+$', re.U)
re_not_az = re.compile('[^a-zA-Z]')
class EditionSolrUpdater(AbstractSolrUpdater):
key_prefix = '/books/'
thing_type = '/type/edition'
async def update_key(self, thing: dict) -> tuple[SolrUpdateRequest, list[str]]:
update = SolrUpdateRequest()
new_keys: list[str] = []
if thing['type']['key'] == self.thing_type:
if thing.get("works"):
new_keys.append(thing["works"][0]['key'])
# Make sure we remove any fake works created from orphaned editions
update.deletes.append(thing['key'].replace('/books/', '/works/'))
else:
# index the edition as it does not belong to any work
new_keys.append(thing['key'].replace('/books/', '/works/'))
else:
logger.info(
"%r is a document of type %r. Checking if any work has it as edition in solr...",
thing['key'],
thing['type']['key'],
)
work_key = solr_select_work(thing['key'])
if work_key:
logger.info("found %r, updating it...", work_key)
new_keys.append(work_key)
return update, new_keys
def solr_select_work(edition_key):
"""
Get corresponding work key for given edition key in Solr.
:param str edition_key: (ex: /books/OL1M)
:return: work_key
:rtype: str or None
"""
# solr only uses the last part as edition_key
edition_key = edition_key.split("/")[-1]
if not re_edition_key_basename.match(edition_key):
return None
edition_key = solr_escape(edition_key)
reply = requests.get(
f'{get_solr_base_url()}/select',
params={
'wt': 'json',
'q': f'edition_key:{edition_key}',
'rows': 1,
'fl': 'key',
},
).json()
if docs := reply['response'].get('docs', []):
return docs[0]['key'] # /works/ prefix is in solr
def solr_escape(query):
"""
Escape special characters in Solr query.
:param str query:
:rtype: str
"""
return re.sub(r'([\s\-+!()|&{}\[\]^"~*?:\\])', r'\\\1', query)
def is_sine_nomine(pub: str) -> bool:
"""Check if the publisher is 'sn' (excluding non-letter characters)."""
return re_not_az.sub('', pub).lower() == 'sn'
class EditionSolrBuilder(AbstractSolrBuilder):
def __init__(
self,
edition: dict,
solr_work: 'WorkSolrBuilder | None' = None,
ia_metadata: bp.IALiteMetadata | None = None,
):
self._edition = edition
self._solr_work = solr_work
self._ia_metadata = ia_metadata
self._provider = bp.get_book_provider(edition)
@property
def key(self):
return self._edition['key']
@property
def title(self) -> str | None:
return self._edition.get('title')
@property
def subtitle(self) -> str | None:
return self._edition.get('subtitle')
@property
def alternative_title(self) -> set[str]:
"""Get titles from the editions as alternative titles."""
result: set[str] = set()
full_title = self._edition.get('title')
if not full_title:
return result
if self._edition.get('subtitle'):
full_title += ': ' + cast(str, self._edition['subtitle'])
result.add(full_title)
result.update(self._edition.get('work_titles', []))
result.update(self._edition.get('other_titles', []))
return result
@property
def cover_i(self) -> int | None:
return next(
(
cover_id
for cover_id in self._edition.get('covers', [])
if cover_id != -1
),
None,
)
@property
def language(self) -> list[str]:
"""Gets the 3 letter language codes (eg ['ger', 'fre'])"""
result: list[str] = []
for lang in self._edition.get('languages', []):
m = re_lang_key.match(lang['key'] if isinstance(lang, dict) else lang)
if m:
result.append(m.group(1))
return uniq(result)
@property
def publisher(self) -> list[str]:
return uniq(
publisher if not is_sine_nomine(publisher) else 'Sine nomine'
for publisher in self._edition.get('publishers', [])
)
@property
def number_of_pages(self) -> int | None:
try:
return int(self._edition.get('number_of_pages', None)) or None
except (TypeError, ValueError): # int(None) -> TypeErr, int("vii") -> ValueErr
return None
@property
def translation_of(self) -> str | None:
return self._edition.get("translation_of")
@property
def format(self) -> str | None:
return self._edition.get('physical_format')
@property
def isbn(self) -> list[str]:
"""
Get all ISBNs of the given edition. Calculates complementary ISBN13 for each
ISBN10 and vice-versa. Does not remove '-'s.
"""
isbns = []
isbns += [
isbn.replace("_", "").strip() for isbn in self._edition.get("isbn_13", [])
]
isbns += [
isbn.replace("_", "").strip() for isbn in self._edition.get("isbn_10", [])
]
# Get the isbn13 when isbn10 is present and vice-versa.
isbns += [opposite_isbn(v) for v in isbns]
return uniq(isbn for isbn in isbns if isbn)
@property
def lccn(self) -> list[str]:
return uniq(lccn.strip() for lccn in self._edition.get('lccn', []))
@property
def publish_date(self) -> str | None:
return self._edition.get('publish_date')
@property
def publish_year(self) -> int | None:
if self.publish_date:
m = re_year.search(self.publish_date)
return int(m.group(1)) if m else None
else:
return None
@property
def ia(self) -> str | None:
ocaid = self._edition.get('ocaid')
return ocaid.strip() if ocaid else None
@property
def ia_collection(self) -> list[str]:
collections = self._ia_metadata['collection'] if self._ia_metadata else set()
# Exclude fav-* collections because they're not useful to us.
return [c for c in collections if not c.startswith('fav-')]
@property
def ia_box_id(self) -> list[str]:
boxids = []
if 'ia_box_id' in self._edition:
if isinstance(self._edition['ia_box_id'], str):
boxids = [self._edition['ia_box_id']]
elif isinstance(self._edition['ia_box_id'], list):
boxids = self._edition['ia_box_id']
else:
logger.warning(
f'Bad ia_box_id on {self.key}: "{self._edition["ia_box_id"]}"'
)
if self._ia_metadata:
boxids += list(self._ia_metadata.get('boxid') or [])
return uniq(boxids, key=lambda x: x.lower())
@property
def identifiers(self) -> dict:
identifiers = {}
for key, id_list in self._edition.get('identifiers', {}).items():
solr_key = (
key.replace('.', '_')
.replace(',', '_')
.replace('(', '')
.replace(')', '')
.replace(':', '_')
.replace('/', '')
.replace('#', '')
.lower()
)
m = re_solr_field.match(solr_key)
if not m:
logger.warning(f'Bad identifier on {self.key}: "{key}"')
continue
identifiers[f'id_{solr_key}'] = uniq(v.strip() for v in id_list)
return identifiers
@cached_property
def ebook_access(self) -> bp.EbookAccess:
if not self._provider:
return bp.EbookAccess.NO_EBOOK
elif isinstance(self._provider, bp.InternetArchiveProvider):
return self._provider.get_access(self._edition, self._ia_metadata)
else:
return self._provider.get_access(self._edition)
@property
def has_fulltext(self) -> bool:
return self.ebook_access > bp.EbookAccess.UNCLASSIFIED
@property
def public_scan_b(self) -> bool:
return self.ebook_access == bp.EbookAccess.PUBLIC
def build(self) -> SolrDocument:
"""
Build the solr document for the given edition to store as a nested
document
Completely override parent class method to handle some peculiar
fields
"""
solr_doc: SolrDocument = cast(
SolrDocument,
{
'key': self.key,
'type': 'edition',
# Display data
'title': self.title,
'subtitle': self.subtitle,
'alternative_title': list(self.alternative_title),
'cover_i': self.cover_i,
'language': self.language,
# Duplicate the author data from the work
**(
{
'author_name': self._solr_work.author_name,
'author_key': self._solr_work.author_key,
'author_alternative_name': list(
self._solr_work.author_alternative_name
),
'author_facet': self._solr_work.author_facet,
}
if self._solr_work
else {}
),
# Misc useful data
'publisher': self.publisher,
'format': [self.format] if self.format else None,
'publish_date': [self.publish_date] if self.publish_date else None,
'publish_year': [self.publish_year] if self.publish_year else None,
# Identifiers
'isbn': self.isbn,
'lccn': self.lccn,
**self.identifiers,
# IA
'ia': [self.ia] if self.ia else None,
'ia_collection': self.ia_collection,
'ia_box_id': self.ia_box_id,
# Ebook access
'ebook_access': self.ebook_access.to_solr_str(),
'has_fulltext': self.has_fulltext,
'public_scan_b': self.public_scan_b,
},
)
return cast(
SolrDocument,
{
key: solr_doc[key] # type: ignore
for key in solr_doc
if solr_doc[key] not in (None, [], '') # type: ignore
},
)
| 11,402 |
Python
|
.py
| 292 | 28.705479 | 97 | 0.549358 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
208 |
work.py
|
internetarchive_openlibrary/openlibrary/solr/updater/work.py
|
from collections import defaultdict
from collections.abc import Iterable
import datetime
from functools import cached_property
import itertools
import logging
from math import ceil
import re
from statistics import median
import time
from typing import Optional, TypedDict, cast
from openlibrary.core import helpers as h
import openlibrary.book_providers as bp
from openlibrary.core.ratings import WorkRatingsSummary
from openlibrary.plugins.upstream.utils import safeget
from openlibrary.plugins.worksearch.subjects import SubjectPseudoKey
from openlibrary.solr.data_provider import DataProvider, WorkReadingLogSolrSummary
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.updater.edition import EditionSolrBuilder
from openlibrary.solr.utils import SolrUpdateRequest, str_to_key
from openlibrary.utils import uniq
from openlibrary.utils.ddc import choose_sorting_ddc, normalize_ddc
from openlibrary.utils.lcc import choose_sorting_lcc, short_lcc_to_sortable_lcc
from openlibrary.utils.open_syllabus_project import get_total_by_olid
logger = logging.getLogger("openlibrary.solr")
re_author_key = re.compile(r'^/(?:a|authors)/(OL\d+A)')
re_edition_key = re.compile(r"/books/([^/]+)")
re_subject = re.compile("[, _]+")
class WorkSolrUpdater(AbstractSolrUpdater):
key_prefix = '/works/'
thing_type = '/type/work'
async def preload_keys(self, keys: Iterable[str]):
await super().preload_keys(keys)
self.data_provider.preload_editions_of_works(keys)
async def update_key(self, work: dict) -> tuple[SolrUpdateRequest, list[str]]:
"""
Get the Solr requests necessary to insert/update this work into Solr.
:param dict work: Work to insert/update
"""
wkey = work['key']
update = SolrUpdateRequest()
# q = {'type': '/type/redirect', 'location': wkey}
# redirect_keys = [r['key'][7:] for r in query_iter(q)]
# redirect_keys = [k[7:] for k in data_provider.find_redirects(wkey)]
# deletes += redirect_keys
# deletes += [wkey[7:]] # strip /works/ from /works/OL1234W
# Handle edition records as well
# When an edition does not contain a works list, create a fake work and index it.
if work['type']['key'] == '/type/edition':
fake_work = {
# Solr uses type-prefixed keys. It's required to be unique across
# all types of documents. The website takes care of redirecting
# /works/OL1M to /books/OL1M.
'key': wkey.replace("/books/", "/works/"),
'type': {'key': '/type/work'},
'title': work.get('title'),
'editions': [work],
'authors': [
{'type': '/type/author_role', 'author': {'key': a['key']}}
for a in work.get('authors', [])
],
}
# Hack to add subjects when indexing /books/ia:xxx
if work.get("subjects"):
fake_work['subjects'] = work['subjects']
return await self.update_key(fake_work)
elif work['type']['key'] == '/type/work':
try:
# Anand - Oct 2013
# For /works/ia:xxx, editions are already supplied. Querying will empty response.
# Fetch editions
if "editions" in work:
editions = work['editions']
else:
editions = self.data_provider.get_editions_of_work(work)
# Fetch authors
author_keys = [
author['author']['key']
for author in normalize_authors(work.get('authors', []))
]
authors = [
await self.data_provider.get_document(key) for key in author_keys
]
if any(a['type']['key'] != '/type/author' for a in authors):
# we don't want to raise an exception but just write a warning on the log
logger.warning('Unexpected author type error: %s', work['key'])
authors = [a for a in authors if a['type']['key'] == '/type/author']
# Fetch ia_metadata
iaids = [e["ocaid"] for e in editions if "ocaid" in e]
ia_metadata = {
iaid: get_ia_collection_and_box_id(iaid, self.data_provider)
for iaid in iaids
}
solr_doc = WorkSolrBuilder(
work, editions, authors, self.data_provider, ia_metadata
).build()
except: # noqa: E722
logger.error("failed to update work %s", work['key'], exc_info=True)
else:
if solr_doc is not None:
iaids = solr_doc.get('ia') or []
# Delete all ia:foobar keys
if iaids:
update.deletes += [f"/works/ia:{iaid}" for iaid in iaids]
update.adds.append(solr_doc)
else:
logger.error("unrecognized type while updating work %s", wkey)
return update, []
def get_ia_collection_and_box_id(
ia: str, data_provider: DataProvider
) -> Optional['bp.IALiteMetadata']:
"""
Get the collections and boxids of the provided IA id
TODO Make the return type of this a namedtuple so that it's easier to reference
:param str ia: Internet Archive ID
:return: A dict of the form `{ boxid: set[str], collection: set[str] }`
:rtype: dict[str, set]
"""
if len(ia) == 1:
return None
def get_list(d, key):
"""
Return d[key] as some form of list, regardless of if it is or isn't.
:param dict or None d:
:param str key:
:rtype: list
"""
if not d:
return []
value = d.get(key, [])
if not value:
return []
elif value and not isinstance(value, list):
return [value]
else:
return value
metadata = data_provider.get_metadata(ia)
if metadata is None:
# It's none when the IA id is not found/invalid.
# TODO: It would be better if get_metadata riased an error.
return None
return {
'boxid': set(get_list(metadata, 'boxid')),
'collection': set(get_list(metadata, 'collection')),
'access_restricted_item': metadata.get('access-restricted-item'),
}
class KeyDict(TypedDict):
key: str
class NormalizedAuthor(TypedDict):
type: KeyDict
author: KeyDict
def normalize_authors(authors: list[dict]) -> list[NormalizedAuthor]:
"""
Need to normalize to a predictable format because of inconsistencies in data
>>> normalize_authors([
... {'type': {'key': '/type/author_role'}, 'author': '/authors/OL1A'}
... ])
[{'type': {'key': '/type/author_role'}, 'author': {'key': '/authors/OL1A'}}]
>>> normalize_authors([{
... "type": {"key": "/type/author_role"},
... "author": {"key": "/authors/OL1A"}
... }])
[{'type': {'key': '/type/author_role'}, 'author': {'key': '/authors/OL1A'}}]
"""
return [
cast(
NormalizedAuthor,
{
'type': {'key': safeget(lambda: a['type']['key'], '/type/author_role')},
'author': (
a['author']
if isinstance(a['author'], dict)
else {'key': a['author']}
),
},
)
for a in authors
# TODO: Remove after
# https://github.com/internetarchive/openlibrary-client/issues/126
if 'author' in a
]
def extract_edition_olid(key: str) -> str:
m = re_edition_key.match(key)
if not m:
raise ValueError(f'Invalid key: {key}')
return m.group(1)
def datetimestr_to_int(datestr):
"""
Convert an OL datetime to a timestamp integer.
:param str or dict datestr: Either a string like `"2017-09-02T21:26:46.300245"` or a dict like
`{"value": "2017-09-02T21:26:46.300245"}`
:rtype: int
"""
if isinstance(datestr, dict):
datestr = datestr['value']
if datestr:
try:
t = h.parse_datetime(datestr)
except (TypeError, ValueError):
t = datetime.datetime.now()
else:
t = datetime.datetime.now()
return int(time.mktime(t.timetuple()))
def subject_name_to_key(subject_type: str, name: str) -> SubjectPseudoKey:
prefix = '/subjects/'
if subject_type != 'subject':
prefix += f'{subject_type}:'
return prefix + re_subject.sub("_", name.lower()).strip("_")
class WorkSolrBuilder(AbstractSolrBuilder):
def __init__(
self,
work: dict,
editions: list[dict],
authors: list[dict],
data_provider: DataProvider,
ia_metadata: dict[str, Optional['bp.IALiteMetadata']],
):
self._work = work
self._editions = editions
self._authors = authors
self._ia_metadata = ia_metadata
self._data_provider = data_provider
self._solr_editions = [
EditionSolrBuilder(
e, self, self._ia_metadata.get(e.get('ocaid', '').strip())
)
for e in self._editions
]
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_identifiers()
doc |= self.build_subjects()
doc |= self.build_legacy_ia_fields()
doc |= self.build_ratings() or {}
doc |= self.build_reading_log() or {}
return cast(SolrDocument, doc)
@property
def key(self):
return self._work['key']
@property
def type(self):
return 'work'
@property
def seed(self) -> list[str]:
w = self._work
return uniq(
itertools.chain(
(e.key for e in self._solr_editions),
(self.key,),
(author['key'] for author in self._authors),
(subject_name_to_key("subject", s) for s in w.get("subjects", [])),
(subject_name_to_key("person", s) for s in w.get("subject_people", [])),
(subject_name_to_key("place", s) for s in w.get("subject_places", [])),
(subject_name_to_key("time", s) for s in w.get("subject_times", [])),
)
)
@property
def title(self) -> str | None:
if self._work.get('title'):
return self._work['title']
else:
# Some works are missing a title, but have titles on their editions
logger.warning('Work missing title %s' % self.key)
return next(
(ed.title for ed in self._solr_editions if ed.title), '__None__'
)
@property
def subtitle(self) -> str | None:
return self._work.get('subtitle')
@property
def alternative_title(self) -> set[str]:
alt_title_set = set()
for book in (EditionSolrBuilder(self._work), *self._solr_editions):
alt_title_set.update(book.alternative_title)
if book.translation_of:
alt_title_set.add(book.translation_of)
return alt_title_set
@property
def alternative_subtitle(self) -> set[str]:
"""Get subtitles from the editions as alternative titles."""
return {
bookish['subtitle'] for bookish in self._editions if bookish.get('subtitle')
}
@property
def edition_count(self) -> int:
return len(self._editions)
@property
def osp_count(self) -> int | None:
return get_total_by_olid(self.key)
@property
def edition_key(self) -> list[str]:
return [extract_edition_olid(e['key']) for e in self._editions]
@property
def by_statement(self) -> set[str]:
return {e["by_statement"] for e in self._editions if "by_statement" in e}
@property
def publish_date(self) -> set[str]:
return {e.publish_date for e in self._solr_editions if e.publish_date}
@property
def publish_year(self) -> set[int]:
return {
year for e in self._solr_editions if (year := e.publish_year) is not None
}
@property
def first_publish_year(self) -> int | None:
if publish_years := self.publish_year:
return min(publish_years)
else:
return None
@property
def number_of_pages_median(self) -> int | None:
number_of_pages = [
pages
for e in self._solr_editions
if (pages := e.number_of_pages) is not None
]
if number_of_pages:
return ceil(median(number_of_pages))
else:
return None
@property
def editions(self) -> list[SolrDocument]:
return [ed.build() for ed in self._solr_editions]
@property
def lccn(self) -> set[str]:
return {lccn for ed in self._solr_editions for lccn in ed.lccn}
@property
def publish_place(self) -> set[str]:
return {v for e in self._editions for v in e.get('publish_places', [])}
@property
def oclc(self) -> set[str]:
return {v for e in self._editions for v in e.get('oclc_numbers', [])}
@property
def contributor(self) -> set[str]:
return {
v
for e in self._editions
for v in (
e.get('contributions', [])
# TODO: contributors wasn't included here in the past, but
# we likely want it to be edition-only if possible?
# Excluding for now to avoid a possible perf hit in the
# next full reindex which is already pretty loaded
# + [c.get('name') for c in e.get('contributors', [])]
)
if v
}
@property
def lcc(self) -> set[str]:
raw_lccs = {
lcc for ed in self._editions for lcc in ed.get('lc_classifications', [])
}
return {lcc for lcc in map(short_lcc_to_sortable_lcc, raw_lccs) if lcc}
@property
def lcc_sort(self) -> str | None:
if lccs := self.lcc:
return choose_sorting_lcc(lccs)
else:
return None
@property
def ddc(self) -> set[str]:
raw_ddcs = {ddc for ed in self._editions for ddc in get_edition_ddcs(ed)}
return {ddc for raw_ddc in raw_ddcs for ddc in normalize_ddc(raw_ddc) if ddc}
@property
def ddc_sort(self) -> str | None:
if ddcs := self.ddc:
return choose_sorting_ddc(ddcs)
else:
return None
@property
def isbn(self) -> set[str]:
return {isbn for ed in self._editions for isbn in EditionSolrBuilder(ed).isbn}
@property
def last_modified_i(self) -> int:
return max(
datetimestr_to_int(doc.get('last_modified'))
for doc in (self._work, *self._editions)
)
@property
def ebook_count_i(self) -> int:
return sum(
1 for e in self._solr_editions if e.ebook_access > bp.EbookAccess.NO_EBOOK
)
@cached_property
def ebook_access(self) -> bp.EbookAccess:
return max(
(e.ebook_access for e in self._solr_editions),
default=bp.EbookAccess.NO_EBOOK,
)
@property
def has_fulltext(self) -> bool:
return any(e.has_fulltext for e in self._solr_editions)
@property
def public_scan_b(self) -> bool:
return any(e.public_scan_b for e in self._solr_editions)
@cached_property
def ia(self) -> list[str]:
return [cast(str, e.ia) for e in self._ia_editions]
@property
def ia_collection(self) -> list[str]:
return sorted(uniq(c for e in self._solr_editions for c in e.ia_collection))
@property
def ia_collection_s(self) -> str:
return ';'.join(self.ia_collection)
@cached_property
def _ia_editions(self) -> list[EditionSolrBuilder]:
def get_ia_sorting_key(ed: EditionSolrBuilder) -> tuple[int, str]:
return (
# -1 to sort in reverse and make public first
-1 * ed.ebook_access.value,
# De-prioritize google scans because they are lower quality
'0: non-goog' if not cast(str, ed.ia).endswith('goog') else '1: goog',
)
return sorted((e for e in self._solr_editions if e.ia), key=get_ia_sorting_key)
# --- These should be deprecated and removed ---
@property
def lending_edition_s(self) -> str | None:
if (
not self._ia_editions
or self._ia_editions[0].ebook_access <= bp.EbookAccess.PRINTDISABLED
):
return None
else:
return extract_edition_olid(self._ia_editions[0].key)
@property
def lending_identifier_s(self) -> str | None:
if (
not self._ia_editions
or self._ia_editions[0].ebook_access <= bp.EbookAccess.PRINTDISABLED
):
return None
else:
return self._ia_editions[0].ia
@property
def printdisabled_s(self) -> str | None:
printdisabled_eds = [
ed for ed in self._ia_editions if 'printdisabled' in ed.ia_collection
]
if not printdisabled_eds:
return None
else:
return ';'.join(
cast(str, extract_edition_olid(ed.key)) for ed in printdisabled_eds
)
# ^^^ These should be deprecated and removed ^^^
def build_ratings(self) -> WorkRatingsSummary | None:
return self._data_provider.get_work_ratings(self._work['key'])
def build_reading_log(self) -> WorkReadingLogSolrSummary | None:
return self._data_provider.get_work_reading_log(self._work['key'])
@cached_property
def cover_i(self) -> int | None:
work_cover_id = next(
itertools.chain(
(
cover_id
for cover_id in self._work.get('covers', [])
if cover_id != -1
),
[None],
)
)
return work_cover_id or next(
(ed.cover_i for ed in self._solr_editions if ed.cover_i is not None), None
)
@property
def cover_edition_key(self) -> str | None:
if self.cover_i is None:
return None
return next(
(
extract_edition_olid(ed['key'])
for ed in self._editions
if self.cover_i in ed.get('covers', [])
),
None,
)
@property
def first_sentence(self) -> set[str]:
return {
s['value'] if isinstance(s, dict) else s
for ed in self._editions
if (s := ed.get('first_sentence', None))
}
@property
def publisher(self) -> set[str]:
return {publisher for ed in self._solr_editions for publisher in ed.publisher}
@property
def format(self) -> set[str]:
return {ed.format for ed in self._solr_editions if ed.format}
@property
def language(self) -> set[str]:
return {lang for ed in self._solr_editions for lang in ed.language}
def build_legacy_ia_fields(self) -> dict:
ia_loaded_id = set()
ia_box_id = set()
for e in self._editions:
# When do we write these to the actual edition?? This code might
# be dead.
if e.get('ia_loaded_id'):
if isinstance(e['ia_loaded_id'], str):
ia_loaded_id.add(e['ia_loaded_id'])
else:
try:
assert isinstance(e['ia_loaded_id'], list)
assert isinstance(e['ia_loaded_id'][0], str)
except AssertionError:
logger.error(
"AssertionError: ia=%s, ia_loaded_id=%s",
e.get("ia"),
e['ia_loaded_id'],
)
raise
ia_loaded_id.update(e['ia_loaded_id'])
if e.get('ia_box_id'):
if isinstance(e['ia_box_id'], str):
ia_box_id.add(e['ia_box_id'])
else:
try:
assert isinstance(e['ia_box_id'], list)
assert isinstance(e['ia_box_id'][0], str)
except AssertionError:
logger.error("AssertionError: %s", e['key'])
raise
ia_box_id.update(e['ia_box_id'])
doc = {}
if ia_loaded_id:
doc['ia_loaded_id'] = list(ia_loaded_id)
if ia_box_id:
doc['ia_box_id'] = list(ia_box_id)
return doc
@cached_property
def author_key(self) -> list[str]:
return [
m.group(1)
for m in (re_author_key.match(a['key']) for a in self._authors)
if m
]
@cached_property
def author_name(self) -> list[str]:
return [a.get('name', '') for a in self._authors]
@cached_property
def author_alternative_name(self) -> set[str]:
return {
alt_name for a in self._authors for alt_name in a.get('alternate_names', [])
}
@cached_property
def author_facet(self) -> list[str]:
return [f'{key} {name}' for key, name in zip(self.author_key, self.author_name)]
def build_identifiers(self) -> dict[str, list[str]]:
identifiers: dict[str, list[str]] = defaultdict(list)
for ed in self._solr_editions:
for k, v in ed.identifiers.items():
identifiers[k] += v
return dict(identifiers)
def build_subjects(self) -> dict:
doc: dict = {}
field_map = {
'subjects': 'subject',
'subject_places': 'place',
'subject_times': 'time',
'subject_people': 'person',
}
for work_field, subject_type in field_map.items():
if not self._work.get(work_field):
continue
doc |= {
subject_type: self._work[work_field],
f'{subject_type}_facet': self._work[work_field],
f'{subject_type}_key': [str_to_key(s) for s in self._work[work_field]],
}
return doc
def get_edition_ddcs(ed: dict):
ddcs: list[str] = ed.get('dewey_decimal_class', [])
if len(ddcs) > 1:
# In DDC, `92` or `920` is sometimes appended to a DDC to denote
# "Biography". We have a clause to handle this if it's part of the same
# DDC (See utils/ddc.py), but some books have it as an entirely separate
# DDC; e.g.:
# * [ "979.4/830046872073", "92" ]
# https://openlibrary.org/books/OL3029363M.json
# * [ "813/.54", "B", "92" ]
# https://openlibrary.org/books/OL2401343M.json
# * [ "092", "823.914" ]
# https://openlibrary.org/books/OL24767417M
ddcs = [ddc for ddc in ddcs if ddc not in ('92', '920', '092')]
return ddcs
| 23,289 |
Python
|
.py
| 587 | 29.529813 | 98 | 0.562085 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
209 |
abstract.py
|
internetarchive_openlibrary/openlibrary/solr/updater/abstract.py
|
from collections.abc import Iterable
from typing import cast
import openlibrary.book_providers as bp
from openlibrary.solr.data_provider import DataProvider
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.utils import SolrUpdateRequest
class AbstractSolrUpdater:
key_prefix: str
thing_type: str
data_provider: DataProvider
def __init__(self, data_provider: DataProvider):
self.data_provider = data_provider
def key_test(self, key: str) -> bool:
return key.startswith(self.key_prefix)
async def preload_keys(self, keys: Iterable[str]):
await self.data_provider.preload_documents(keys)
async def update_key(self, thing: dict) -> tuple[SolrUpdateRequest, list[str]]:
"""
:return: (update, new keys to update)
"""
raise NotImplementedError()
class AbstractSolrBuilder:
def build(self) -> SolrDocument:
# Iterate over all non-_ properties of this instance and add them to the
# document.
# Allow @property and @cached_property though!
doc: dict = {}
for field in dir(self):
if field.startswith('_'):
continue
val = getattr(self, field)
if callable(val):
continue
elif val is None or (isinstance(val, Iterable) and not val):
# Skip if empty list/string
continue
elif isinstance(val, set):
doc[field] = list(val)
elif isinstance(val, bp.EbookAccess):
doc[field] = val.to_solr_str()
elif isinstance(val, (str, int, float, bool, list)):
doc[field] = val
else:
raise ValueError(f'Unknown type for {field}: {type(val)}')
return cast(SolrDocument, doc)
| 1,838 |
Python
|
.py
| 45 | 31.622222 | 83 | 0.629277 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
210 |
list.py
|
internetarchive_openlibrary/openlibrary/solr/updater/list.py
|
from collections import defaultdict
import re
from typing import cast
import httpx
from openlibrary.plugins.openlibrary.lists import (
SeedType,
seed_key_to_seed_type,
)
from openlibrary.plugins.worksearch.subjects import SubjectType
from openlibrary.solr.solr_types import SolrDocument
from openlibrary.solr.updater.abstract import AbstractSolrBuilder, AbstractSolrUpdater
from openlibrary.solr.utils import SolrUpdateRequest, get_solr_base_url, str_to_key
class ListSolrUpdater(AbstractSolrUpdater):
key_prefix = '/lists/'
thing_type = '/type/list'
def key_test(self, key: str) -> bool:
return bool(re.match(r'^(/people/[^/]+)?/lists/[^/]+$', key))
async def update_key(self, list: dict) -> tuple[SolrUpdateRequest, list[str]]:
seeds = ListSolrBuilder(list).seed
lst = ListSolrBuilder(list, await fetch_seeds_facets(seeds))
doc = lst.build()
return SolrUpdateRequest(adds=[doc]), []
async def fetch_seeds_facets(seeds: list[str]):
base_url = get_solr_base_url() + '/select'
facet_fields: list[SubjectType] = ['subject', 'time', 'person', 'place']
seeds_by_type: defaultdict[SeedType, list] = defaultdict(list)
for seed in seeds:
seeds_by_type[seed_key_to_seed_type(seed)].append(seed)
query: list[str] = []
for seed_type, seed_values in seeds_by_type.items():
match seed_type:
case 'edition' | 'author':
edition_olids = " OR ".join(key.split('/')[-1] for key in seed_values)
query.append(f'edition_key:( {edition_olids} )')
case 'work':
seed_keys = " OR ".join(f'"{key}"' for key in seed_values)
query.append(f'key:( {seed_keys} )')
case 'subject':
pass
case _:
raise NotImplementedError(f'Unknown seed type {seed_type}')
async with httpx.AsyncClient() as client:
response = await client.post(
base_url,
timeout=30,
data={
'wt': 'json',
'json.nl': 'arrarr',
'q': ' OR '.join(query),
'fq': 'type:work',
'rows': 0,
'facet': 'true',
'facet.mincount': 1,
'facet.limit': 50,
'facet.field': [f"{field}_facet" for field in facet_fields],
},
)
return response.json()
class ListSolrBuilder(AbstractSolrBuilder):
def __init__(self, list: dict, solr_reply: dict | None = None):
self._list = list
self._solr_reply = solr_reply
def build(self) -> SolrDocument:
doc = cast(dict, super().build())
doc |= self.build_subjects()
return cast(SolrDocument, doc)
def build_subjects(self) -> dict:
if not self._solr_reply:
return {}
doc: dict = {}
for facet, counts in self._solr_reply['facet_counts']['facet_fields'].items():
subject_type = cast(SubjectType, facet.split('_')[0])
subjects = [s for s, count in counts]
doc |= {
subject_type: subjects,
f'{subject_type}_facet': subjects,
f'{subject_type}_key': [str_to_key(s) for s in subjects],
}
return doc
@property
def key(self) -> str:
return self._list['key']
@property
def type(self) -> str:
return 'list'
@property
def name(self) -> str | None:
return self._list.get('name')
@property
def seed(self) -> list[str]:
return [
(
(seed.get('key') or seed['thing']['key'])
if isinstance(seed, dict)
else seed
)
for seed in self._list.get('seeds', [])
]
| 3,819 |
Python
|
.py
| 98 | 29.44898 | 86 | 0.571043 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
211 |
get_ia.py
|
internetarchive_openlibrary/openlibrary/catalog/get_ia.py
|
import requests
from infogami import config
from lxml import etree
from time import sleep
from openlibrary.catalog.marc.marc_binary import MarcBinary
from openlibrary.catalog.marc.marc_xml import MarcXml
from openlibrary.core import ia
import lxml.etree
IA_BASE_URL = config.get('ia_base_url')
IA_DOWNLOAD_URL = f'{IA_BASE_URL}/download/'
MAX_MARC_LENGTH = 100000
def urlopen_keep_trying(url: str, headers=None, **kwargs):
"""Tries to request the url three times, raises HTTPError if 403, 404, or 416. Returns a requests.Response"""
for i in range(3):
try:
resp = requests.get(url, headers=headers, **kwargs)
resp.raise_for_status()
return resp
except requests.HTTPError as error:
if error.response and error.response.status_code in (403, 404, 416):
raise
sleep(2)
def get_marc_record_from_ia(
identifier: str, ia_metadata: dict | None = None
) -> MarcBinary | MarcXml | None:
"""
Takes IA identifiers and optional IA metadata and returns MARC record instance.
08/2018: currently called by openlibrary/plugins/importapi/code.py
when the /api/import/ia endpoint is POSTed to.
:param ia_metadata: The full ia metadata; e.g. https://archive.org/metadata/goody,
not https://archive.org/metadata/goody/metadata
"""
if ia_metadata is None:
ia_metadata = ia.get_metadata(identifier)
filenames = ia_metadata['_filenames'] # type: ignore[index]
marc_xml_filename = identifier + '_marc.xml'
marc_bin_filename = identifier + '_meta.mrc'
item_base = f'{IA_DOWNLOAD_URL}{identifier}/'
# Try marc.bin first
if marc_bin_filename in filenames:
data = urlopen_keep_trying(item_base + marc_bin_filename).content
return MarcBinary(data)
# If that fails, try marc.xml
if marc_xml_filename in filenames:
data = urlopen_keep_trying(item_base + marc_xml_filename).content
root = etree.fromstring(
data, parser=lxml.etree.XMLParser(resolve_entities=False)
)
return MarcXml(root)
return None
def get_from_archive_bulk(locator):
"""
Gets a single binary MARC record from within an Archive.org
bulk MARC item, and return the offset and length of the next
item.
If offset or length are `None`, then there is no next record.
:param str locator: Locator ocaid/filename:offset:length
:rtype: (str|None, int|None, int|None)
:return: (Binary MARC data, Next record offset, Next record length)
"""
if locator.startswith('marc:'):
locator = locator[5:]
filename, offset, length = locator.split(":")
offset = int(offset)
length = int(length)
r0, r1 = offset, offset + length - 1
# get the next record's length in this request
r1 += 5
url = IA_DOWNLOAD_URL + filename
assert 0 < length < MAX_MARC_LENGTH
response = urlopen_keep_trying(url, headers={'Range': 'bytes=%d-%d' % (r0, r1)})
data = None
if response:
# this truncates the data to MAX_MARC_LENGTH, but is probably not necessary here?
data = response.content[:MAX_MARC_LENGTH]
len_in_rec = int(data[:5])
if len_in_rec != length:
data, next_offset, next_length = get_from_archive_bulk(
'%s:%d:%d' % (filename, offset, len_in_rec)
)
else:
next_length = data[length:]
data = data[:length]
if len(next_length) == 5:
# We have data for the next record
next_offset = offset + len_in_rec
next_length = int(next_length)
else:
next_offset = next_length = None
return data, next_offset, next_length
| 3,774 |
Python
|
.py
| 90 | 34.677778 | 114 | 0.651842 |
internetarchive/openlibrary
| 5,078 | 1,311 | 956 |
AGPL-3.0
|
9/5/2024, 5:07:13 PM (Europe/Amsterdam)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 91