File size: 24,420 Bytes
5ef0f8d
7e24a88
2c86f91
5f1cdfa
 
2ba1434
035141c
5ef0f8d
 
 
 
8ac47d4
5ef0f8d
 
 
 
5f1cdfa
5ef0f8d
 
f282fc7
7e24a88
46800f4
5ef0f8d
 
2c86f91
5ef0f8d
9f79248
035141c
 
f6bffda
5ef0f8d
 
 
 
 
 
 
d2dc29e
5ef0f8d
2c86f91
f461b05
2c86f91
6607a5c
2c86f91
5ef0f8d
2dc9b4d
 
 
 
 
7e24a88
 
2dc9b4d
 
eb8cbe5
2dc9b4d
5f1cdfa
 
 
 
 
 
 
 
 
 
6607a5c
2c86f91
6607a5c
5f1cdfa
 
 
 
 
 
 
 
 
 
 
6607a5c
 
5f1cdfa
 
 
 
6607a5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c86f91
5f1cdfa
 
 
 
 
 
 
 
2dc9b4d
 
 
 
 
f282fc7
 
 
 
2dc9b4d
f282fc7
 
 
 
 
 
 
 
 
 
2dc9b4d
f282fc7
 
 
2dc9b4d
f282fc7
 
 
 
 
 
 
 
 
 
 
 
2c86f91
dc794b1
 
 
2c86f91
f3bf993
eb8cbe5
2dc9b4d
 
 
 
6607a5c
2c86f91
dc794b1
 
 
5f1cdfa
2dc9b4d
dc794b1
 
 
5f1cdfa
dc794b1
5ef0f8d
dc794b1
 
 
 
 
eb8cbe5
dc794b1
 
2dc9b4d
 
 
 
 
 
 
 
dc794b1
5ef0f8d
2dc9b4d
 
 
7e24a88
5ef0f8d
 
46800f4
 
eb8cbe5
 
 
 
46800f4
5ef0f8d
46800f4
 
7e24a88
46800f4
 
 
 
 
 
5ef0f8d
 
 
 
 
 
 
 
 
 
46800f4
 
 
 
 
 
5ef0f8d
 
 
 
 
 
 
 
46800f4
5ef0f8d
 
 
 
46800f4
 
 
5ef0f8d
 
 
46800f4
5ef0f8d
 
 
 
 
 
 
7e24a88
5f1cdfa
5ef0f8d
 
 
f282fc7
f3bf993
 
 
 
 
8ac47d4
5ef0f8d
 
 
 
8ac47d4
7e24a88
5ef0f8d
 
 
8ac47d4
 
5ef0f8d
 
 
 
 
8ac47d4
 
5ef0f8d
 
 
 
 
 
 
 
 
8ac47d4
5ef0f8d
 
 
 
 
 
f3bf993
5ef0f8d
 
 
 
f3bf993
 
8ac47d4
7e24a88
 
8ac47d4
 
7e24a88
 
595613d
7e24a88
 
 
5ef0f8d
7e24a88
 
 
 
 
 
 
 
 
 
 
 
 
5ef0f8d
7e24a88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ef0f8d
7e24a88
 
 
5ef0f8d
7e24a88
 
 
 
 
5ef0f8d
7e24a88
 
5ef0f8d
 
 
 
f3bf993
 
5ef0f8d
 
a83bff5
8ac47d4
a83bff5
 
 
9f79248
 
 
 
 
 
 
 
f282fc7
a83bff5
5ef0f8d
f282fc7
2dc9b4d
a83bff5
9f79248
5ef0f8d
a83bff5
9f79248
 
a83bff5
9f79248
a83bff5
9f79248
5ef0f8d
 
 
6607a5c
9f79248
 
 
 
eb8cbe5
6607a5c
9f79248
730ea19
 
 
9f79248
5ef0f8d
 
a83bff5
5ef0f8d
 
a83bff5
 
5ef0f8d
 
 
 
 
f282fc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2dc9b4d
f282fc7
 
 
 
2dc9b4d
f282fc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ef0f8d
 
 
 
 
 
 
 
f3bf993
 
8ac47d4
5ef0f8d
 
 
 
d2dc29e
 
5ef0f8d
 
 
 
 
 
 
 
 
 
 
 
 
f282fc7
2dc9b4d
f282fc7
5ef0f8d
5f1cdfa
 
 
5ef0f8d
 
 
 
 
 
 
 
 
8ac47d4
5ef0f8d
8ac47d4
5ef0f8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ac47d4
5ef0f8d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
import asyncio
from urllib.parse import urlparse
from aiolimiter import AsyncLimiter
from pathlib import Path
import traceback
from typing import Literal, Tuple
from fastapi.routing import APIRouter
import logging
import io
import zipfile
import os
from httpx import AsyncClient
from pydantic import BaseModel
import subprocess
import pandas as pd
import re
import tempfile
from lxml import etree
from bs4 import BeautifulSoup
from fastapi import Depends, File, HTTPException, UploadFile
import urllib
from dependencies import get_http_client, get_llm_router
from fastapi.responses import StreamingResponse
from litellm.router import Router
from kreuzberg import ExtractionConfig, extract_bytes

from schemas import DocInfo, GetMeetingDocsRequest, GetMeetingDocsResponse, DocRequirements, DownloadDocsRequest, GetMeetingsRequest, GetMeetingsResponse, ExtractRequirementsRequest, ExtractRequirementsResponse

# API router for requirement extraction from docs / doc list retrieval / download
router = APIRouter(tags=["document extraction"])

# ==================================================== Utilities =================================================================
NSMAP = {
    'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
    'v': 'urn:schemas-microsoft-com:vml'
}

# ================================== Converting of files to .txt ====================================

KREUZBERG_CONFIG: ExtractionConfig = ExtractionConfig(
    force_ocr=False, ocr_backend=None, extract_tables=True)

# Unfortunately needs to be kept to 1, as libreoffice isn't built to support parallel instances
LO_CONVERSION_MUTEX = asyncio.Semaphore(1)

# Supported file types for text extraction and their MIME type
FORMAT_MIME_TYPES = {
    ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
    ".pdf": "application/pdf",
    ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
    ".doc": "application/msword",
    ".ppt": "application/vnd.ms-powerpoint"
}


async def convert_file_type(contents: io.BytesIO, filename: str, input_ext: str, output_ext: str, filter: str = None) -> io.BytesIO:
    """
    Converts the given file bytes using Libreoffice headless to the specified file type.

    Args:
        contents: File contents
        filename: File base name WITHOUT THE EXTENSION
        input_ext: Input extension (WITHOUT THE DOT)
        output_ext: Output extension (WITHOUT THE DOT)
        filter: The conversion filter to use.
    """

    await LO_CONVERSION_MUTEX.acquire()

    with tempfile.TemporaryDirectory() as tmpdir:
        dir_path = Path(tmpdir)
        input_file_path = dir_path / f"{filename}.{input_ext}"
        output_file_path = dir_path / f"{filename}.{output_ext}"

        # write the memory contents to the input file
        with open(input_file_path, "wb") as in_file:
            in_file.write(contents.read())

        out_bytes = io.BytesIO()

        # construct the command
        command = [
            "libreoffice",
            "--headless",
            "--convert-to", f"{output_ext}:{filter}" if filter else output_ext,
            "--outdir", tmpdir,
            str(input_file_path)  # Ensure path is a string for subprocess
        ]

        # convert using libreoffice asynchronously
        process = await asyncio.create_subprocess_exec(
            *command,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE
        )

        stdout, stderr = await process.communicate()

        exit_code = await process.wait()

        if exit_code != 0 and not output_file_path.exists():
            raise subprocess.CalledProcessError(
                exit_code,
                command,
                output=stdout,
                stderr=stderr
            )

        LO_CONVERSION_MUTEX.release()

        with open(output_file_path, mode="rb") as out:
            out_bytes.write(out.read())

        out_bytes.seek(0)
        return out_bytes


async def extract_text_contents(filename: str, ext: str, bytes: io.BytesIO) -> list[str]:
    """
    Convert given file represented as a (filename, ext, bytes) to a list of lines.
    File types which require conversion for handling are converted to the appropriate format before being converted to text.
    """

    final_text: str = None
    if ext == ".doc":
        logging.debug(f"Converting {filename} .doc --> .docx")
        docx_bytes = await convert_file_type(bytes, filename, "doc", "docx")
        extracted_data = await extract_bytes(docx_bytes.read(), FORMAT_MIME_TYPES[".docx"], config=KREUZBERG_CONFIG)
        final_text = extracted_data.content
    elif ext == ".docx":
        # Applying doc revisions to docx files (especially for pCR / draftCR files)
        logging.debug(f"Updating .docx revisions for {filename}.")
        applied_revision = apply_docx_revisions(zipfile.ZipFile(bytes))
        extracted_data = await extract_bytes(applied_revision.read(), FORMAT_MIME_TYPES[".docx"], config=KREUZBERG_CONFIG)
        final_text = extracted_data.content
    elif ext == ".ppt":
        logging.debug(f"Converting {filename} .ppt --> .pptx")
        docx_bytes = await convert_file_type(bytes, filename, "ppt", "pptx")
        extracted_data = await extract_bytes(docx_bytes.read(), FORMAT_MIME_TYPES[".pptx"], config=KREUZBERG_CONFIG)
        final_text = extracted_data.content
    else:
        if ext in FORMAT_MIME_TYPES:  # check if file extension is supported
            extracted_data = await extract_bytes(bytes.read(), FORMAT_MIME_TYPES[ext], config=KREUZBERG_CONFIG)
            final_text = extracted_data.content
        else:
            raise Exception(
                f"Unsupported file type: {ext}, filename: {filename}")

    # include an empty line in the beginning
    txt_data = [""] + [line.strip()
                       for line in final_text.splitlines() if line.strip()]

    return txt_data

# Rate limit of FTP downloads per minute
FTP_DOWNLOAD_RATE_LIMITER = AsyncLimiter(max_rate=60, time_period=60)
# Max number of parallel workers downloading
FTP_MAX_PARALLEL_WORKERS = asyncio.Semaphore(4)


async def get_doc_archive(url: str, client: AsyncClient) -> tuple[str, str, io.BytesIO]:
    """
    Récupère le document zippé depuis l'URL et le retourne un tuple (nom, extension, contenu).
    Le premier document avec une extension convertible en texte est séléctionné
    """

    async with FTP_DOWNLOAD_RATE_LIMITER:
        async with FTP_MAX_PARALLEL_WORKERS:
            if not url.endswith("zip"):
                raise ValueError("URL doit pointer vers un fichier ZIP")

            # doc_id = os.path.splitext(os.path.basename(url))[0]
            resp = await client.get(url, headers={
                "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            })

            resp.raise_for_status()

            with zipfile.ZipFile(io.BytesIO(resp.content)) as zf:
                # there should be a single file per file
                for entry in zf.infolist():
                    if entry.is_dir():
                        continue

                    file_name = entry.filename
                    root, ext = os.path.splitext(file_name)
                    ext = ext.lower()

                    # skip the file if it isn't supported
                    if ext not in FORMAT_MIME_TYPES:
                        logging.debug(
                            f"Skipping unsupported filetype found in archive: {ext}")
                        continue

                    doc_bytes = zf.read(file_name)

                    return (root, ext, io.BytesIO(doc_bytes))

            raise ValueError(
                f"No file with a supported extension type was found in the archive file: {ext}")


def apply_docx_revisions(docx_zip: zipfile.ZipFile) -> io.BytesIO:
    """
    Applique les révisions des .docx avant de retourner le contenu.

    Args:
        docx_zip: Le document word sous forme de zip
    """

    try:
        xml_bytes = docx_zip.read('word/document.xml')
        logging.debug("Read the document XML")
    except KeyError:
        raise FileNotFoundError(
            "word/document.xml not found in the DOCX archive.")

    parser = etree.XMLParser(remove_blank_text=True)
    root = etree.fromstring(xml_bytes, parser=parser)

    # Suppression des balises <w:del> et leur contenu
    for del_elem in root.xpath('//w:del', namespaces=NSMAP):
        parent = del_elem.getparent()
        if parent is not None:
            parent.remove(del_elem)

    # Désencapsulation des balises <w:ins>
    for ins_elem in root.xpath('//w:ins', namespaces=NSMAP):
        parent = ins_elem.getparent()
        if parent is not None:
            index = parent.index(ins_elem)
            for child in ins_elem.iterchildren():
                parent.insert(index, child)
                index += 1
            parent.remove(ins_elem)

    # Nettoyage des commentaires
    for tag in ['w:commentRangeStart', 'w:commentRangeEnd', 'w:commentReference']:
        for elem in root.xpath(f'//{tag}', namespaces=NSMAP):
            parent = elem.getparent()
            if parent is not None:
                parent.remove(elem)

    # 3. Create a new docx with the modified XML
    output = io.BytesIO()

    with zipfile.ZipFile(output, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
        # Copier tous les fichiers non modifiés
        for file_info in docx_zip.infolist():
            if file_info.filename != 'word/document.xml':
                new_zip.writestr(file_info, docx_zip.read(file_info.filename))

        # Ajouter le document.xml modifié
        xml_str = etree.tostring(
            root,
            xml_declaration=True,
            encoding='UTF-8',
            pretty_print=True
        )
        new_zip.writestr('word/document.xml', xml_str)

    output.seek(0)
    logging.debug("Exporting new docx revision OK")
    return output

# ============================================= Doc routes =========================================================


@router.post("/get_meetings", response_model=GetMeetingsResponse)
async def get_meetings(req: GetMeetingsRequest, http_client: AsyncClient = Depends(get_http_client)):
    """
    Retrieves the list of meetings for the given working group.
    """
    # Extracting WG
    working_group = req.working_group
    tsg = re.sub(r"\d+", "", working_group)
    wg_number = re.search(r"\d", working_group).group(0)

    # building corresponding FTP url
    logging.debug(f"FTP internal working group ID is {tsg}{wg_number}")
    url = "https://www.3gpp.org/ftp/tsg_" + tsg
    logging.debug(url)

    ftp_request = await http_client.get(url)
    soup = BeautifulSoup(ftp_request.text, "html.parser")

    meeting_folders = []
    all_meetings = []
    wg_folders = [item.get_text() for item in soup.select("tr td a")]
    selected_folder = None

    # sanity check to ensure the requested workgroup is present in the ftp directories
    for folder in wg_folders:
        if "wg" + str(wg_number) in folder.lower():
            selected_folder = folder
            break

    url += "/" + selected_folder
    logging.debug(url)

    if selected_folder:
        resp = await http_client.get(url)
        soup = BeautifulSoup(resp.text, "html.parser")
        meeting_folders = [item.get_text() for item in soup.select("tr td a") if item.get_text(
        ).startswith("TSG") or (item.get_text().startswith("CT") and "-" in item.get_text())]
        all_meetings = [working_group + "#" + meeting.split("_", 1)[1].replace("_", " ").replace(
            "-", " ") if meeting.startswith('TSG') else meeting.replace("-", "#") for meeting in meeting_folders]

    return GetMeetingsResponse(meetings=dict(zip(all_meetings, meeting_folders)))

# ============================================================================================================================================


@router.post("/get_meeting_docs", response_model=GetMeetingDocsResponse)
async def get_meeting_docs(req: GetMeetingDocsRequest, http_client: AsyncClient = Depends(get_http_client)) -> GetMeetingDocsResponse:
    """
    Downloads the document list dataframe for a given meeting OR alternatively returns the document list dataframe from the given FTP URL.
    If `custom_url` field is set in the request schema, the other fields are ignored.
    """

    if req.custom_url:
        logging.info(f"Fetching TDocs at custom URL {req.custom_url}")

        # Only allow 3GPP FTP URLs
        if '3gpp.org' not in req.custom_url:
            raise HTTPException(status_code=401)

        # 3GPP FTP listing is an ASP.NET app which tags available file links with a .file CSS class
        reponse = await http_client.get(req.custom_url)
        soup = BeautifulSoup(reponse.text, "html.parser")

        # retrieve all file links from HTML response
        file_links = [l.get('href')
                      for l in soup.select('table > tbody > tr a.file')]

        # extract all the file names (without extensions)
        file_names = [
            urllib.parse.unquote(Path(urllib.parse.urlparse(url).path).stem)
            for url in file_links
        ]

        # create doc list
        df = pd.DataFrame()
        df["TDoc"] = file_names
        df["URL"] = file_links

        # build document list dataframe
        DF_COL_TYPES = ["TDoc", "Title", "Type", "For",
                        "TDoc Status", "Agenda item description", "URL"]

        for tp in DF_COL_TYPES:
            df[tp] = "Unknown"

        df["TDoc"] = file_names
        df["URL"] = file_links
        df["Type"] = "TDoc / xxxxCR"

        return GetMeetingDocsResponse(data=df.to_dict(orient="records"))

    else:
        # Walk the FTP directory listing to find the excel sheet which contains all the available documents for the meeting

        # Extracting WG
        working_group = req.working_group
        tsg = re.sub(r"\d+", "", working_group)
        wg_number = re.search(r"\d", working_group).group(0)
        url = "https://www.3gpp.org/ftp/tsg_" + tsg

        logging.info(
            f"Fetching TDocs dataframe for {working_group}:{req.meeting}")

        resp = await http_client.get(url)
        soup = BeautifulSoup(resp.text, "html.parser")
        wg_folders = [item.get_text() for item in soup.select("tr td a")]
        selected_folder = None
        for folder in wg_folders:
            if "wg" + str(wg_number) in folder.lower():
                selected_folder = folder
                break

        url += "/" + selected_folder + "/" + req.meeting + "/docs"
        resp = await http_client.get(url)
        soup = BeautifulSoup(resp.text, "html.parser")
        files = [item.get_text() for item in soup.select("tr td a")
                 if item.get_text().endswith(".xlsx")]

        if files == []:
            raise HTTPException(
                status_code=404, detail="No Excel file has been found")

        df = pd.read_excel(str(url + "/" + files[0]).replace("#", "%23"))
        filtered_df = df[~(
            df["Uploaded"].isna())][["TDoc", "Title", "CR category", "For", "Source", "Type", "Agenda item", "Agenda item description", "TDoc Status"]]
        filtered_df["URL"] = filtered_df["TDoc"].apply(
            lambda tdoc: f"{url}/{tdoc}.zip")

        df = filtered_df.fillna("")
        return GetMeetingDocsResponse(data=df[["TDoc", "Title", "Type", "For", "TDoc Status", "Agenda item description", "URL"]].to_dict(orient="records"))

# ==================================================================================================================================


@router.post("/download_docs")
async def download_docs(req: DownloadDocsRequest, http_client: AsyncClient = Depends(get_http_client)) -> StreamingResponse:
    """Download the specified TDocs and zips them in a single archive"""

    # Document IDs to download
    document_ids = [doc.document for doc in req.documents]

    logging.info(f"Downloading TDocs: {document_ids}")

    # quick function for normalizing agenda item names
    def __normalize_for_path(text: str) -> str:
        if not text:
            return "_unspecified_agenda_item"
        text = re.sub(r'\s+', '_', text)
        text = re.sub(r'[^\w\s-]', '', text).strip()
        return text if text else "_unspecified_agenda_item"

    async def _process_single_document(item: DocInfo):
        """Attempts to convert a document to text and returns success status and content."""
        try:
            filename, ext, bytes = await get_doc_archive(item.url, http_client)
            text_lines = await extract_text_contents(filename, ext, bytes)
            content_bytes = "\n".join(text_lines).encode("utf-8")
            return {"doc_id": item.document, "content": content_bytes, "agenda_item": item.agenda_item}
        except Exception as e:
            logging.warning(
                f"Failed to process document '{item.document}' from URL '{item.url}': {e}")
            error_message = f"Document '{item.document}' text extraction failed: {e}".encode(
                "utf-8")
            return {"doc_id": item.document, "content": error_message, "failed": True, "agenda_item": item.agenda_item}

    convert_tasks = await asyncio.gather(*[_process_single_document(doc) for doc in req.documents], return_exceptions=False)

    zip_buffer = io.BytesIO()
    with zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED) as zip_file:
        for task in convert_tasks:
            # get agenda item directory
            agenda_item_str = task.get("agenda_item") or ""
            directory_name = __normalize_for_path(agenda_item_str)

            failed = "failed" in task
            doc_id = task["doc_id"]
            base_filename = f"failed_{doc_id}.txt" if failed else f"{doc_id}.txt"

            # sort by agenda item if enabled
            full_file_path = f"{directory_name}/{base_filename}" if req.sort_by_agenda_item else base_filename
            zip_file.writestr(full_file_path, task["content"])

    zip_buffer.seek(0)

    return StreamingResponse(
        zip_buffer,
        media_type="application/zip",
        headers={"Content-Disposition": "attachment; filename=tdocs.zip"}
    )

# ======================================================================================================================================================================================


@router.post("/download_user_docs")
async def download_user_docs(files: list[UploadFile] = File(...)):
    """Freeform convert the user files into text and downloads them as a single zip file."""
    file_infos = []

    # retrieve all files
    for file in files:
        filename, ext = os.path.splitext(file.filename)
        file_infos.append({
            "filename": filename,
            "extension": ext,
            "content": io.BytesIO(await file.read())
        })

    filenames = [file["filename"] for file in file_infos]
    logging.info(f"Got {len(file_infos)} user files to convert.")
    logging.debug(f"Filenames: {filenames}")

    # convert files to text
    async def _process_single_document(item: dict):
        try:
            text_lines = await extract_text_contents(item["filename"], item["extension"], item["content"])
            content_bytes = "\n".join(text_lines).encode("utf-8")
            return {"doc_id": item["filename"], "content": content_bytes}
        except Exception as e:
            doc = item["filename"]
            logging.warning(
                f"Failed to process document '{doc}': {e}")
            error_message = f"Document '{doc}' text extraction failed: {e}".encode(
                "utf-8")
            return {"doc_id": doc, "content": error_message, "failed": True}

    convert_tasks = await asyncio.gather(*[_process_single_document(file) for file in file_infos], return_exceptions=False)

    zip_buffer = io.BytesIO()
    with zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED) as zip_file:
        for task in convert_tasks:
            failed = "failed" in task
            doc_id = task["doc_id"]
            base_filename = f"failed_{doc_id}.txt" if failed else f"{doc_id}.txt"
            zip_file.writestr(base_filename, task["content"])

    zip_buffer.seek(0)

    return StreamingResponse(
        zip_buffer,
        media_type="application/zip",
        headers={"Content-Disposition": "attachment; filename=user_files.zip"}
    )

# ======================================================================================================================================================================================


class ProgressUpdate(BaseModel):
    """Defines the structure of a single SSE message."""
    status: Literal["progress", "complete"]
    data: dict
    total_docs: int
    processed_docs: int


@router.post("/extract_requirements/sse")
async def extract_requirements_from_docs(req: ExtractRequirementsRequest, llm_router: Router = Depends(get_llm_router), http_client: AsyncClient = Depends(get_http_client)):
    """Extract requirements from the specified xxxxCR docs using a LLM and returns SSE events about the progress of ongoing operations"""

    documents = req.documents
    n_docs = len(documents)

    logging.info(
        "Generating requirements for documents: {}".format(req.documents))

    # limit max concurrency of LLM requests to prevent a huge pile of errors because of small rate limits
    concurrency_sema = asyncio.Semaphore(4)

    def prompt(doc_id, full):
        return f"Here's the document whose ID is {doc_id} : {full}\n\nExtract all requirements and group them by context, returning a list of objects where each object includes a document ID, a concise description of the context where the requirements apply (not a chapter title or copied text), and a list of associated requirements; always return the result as a list, even if only one context is found. Remove the errors"

    async def _process_document(doc) -> list[DocRequirements]:
        doc_id = doc.document
        url = doc.url

        # convert the docx to txt for use
        try:
            filename, ext, bytes = await get_doc_archive(url, http_client)
            txt_data = await extract_text_contents(filename, ext, bytes)
            full = "\n".join(txt_data)
        except Exception as e:
            fmt = "".join(traceback.format_exception(e))
            logging.error(f"Failed to process doc {doc_id} : {fmt}")
            return [DocRequirements(document=doc_id, context="Failed to process document", requirements=[])]

        try:
            await concurrency_sema.acquire()

            model_used = "gemini-v2"
            resp_ai = await llm_router.acompletion(
                model=model_used,
                messages=[
                    {"role": "user", "content": prompt(doc_id, full)}],
                response_format=ExtractRequirementsResponse
            )
            return ExtractRequirementsResponse.model_validate_json(resp_ai.choices[0].message.content).requirements
        except Exception as e:
            return [DocRequirements(document=doc_id, context="Error LLM", requirements=[])]
        finally:
            concurrency_sema.release()

    # futures for all processed documents
    process_futures = [_process_document(doc) for doc in documents]

    # lambda to print progress
    def progress_update(x): return f"data: {x.model_dump_json()}\n\n"

    # async generator that generates  the SSE events for progress
    async def _stream_generator(docs: list[asyncio.Future]):
        items = []
        n_processed = 0

        yield progress_update(ProgressUpdate(status="progress", data={}, total_docs=n_docs, processed_docs=0))

        for doc in asyncio.as_completed(docs):
            result = await doc
            items.extend(result)
            n_processed += 1
            yield progress_update(ProgressUpdate(status="progress", data={}, total_docs=n_docs, processed_docs=n_processed))

            final_response = ExtractRequirementsResponse(requirements=items)

        yield progress_update(ProgressUpdate(status="complete", data=final_response.model_dump(), total_docs=n_docs, processed_docs=n_processed))

    return StreamingResponse(_stream_generator(process_futures), media_type="text/event-stream")