| """ |
| Microsoft Azure Document Intelligence layout inference. |
| |
| Uses Azure Document Intelligence (Form Recognizer) API for document analysis. |
| """ |
| import asyncio |
| import os |
|
|
| from azure.ai.formrecognizer import DocumentAnalysisClient |
| from azure.core.credentials import AzureKeyCredential |
|
|
| from base import BaseInference, create_argument_parser, parse_args_with_extra |
|
|
| CATEGORY_MAP = { |
| "Title": "heading1", |
| "SectionHeading": "heading1", |
| "footnote": "footnote", |
| "PageHeader": "header", |
| "PageFooter": "footer", |
| "Paragraph": "paragraph", |
| "Subheading": "heading1", |
| "SectionMarks": "paragraph", |
| "PageNumber": "paragraph" |
| } |
|
|
|
|
| class MicrosoftInference(BaseInference): |
| """Microsoft Azure Document Intelligence layout inference.""" |
| |
| def __init__( |
| self, |
| save_path, |
| input_formats=None, |
| concurrent_limit=None, |
| sampling_rate=1.0, |
| request_timeout=600, |
| random_seed=None, |
| group_by_document=False, |
| file_ext_mapping=None |
| ): |
| """Initialize the MicrosoftInference class. |
| |
| Args: |
| save_path (str): the json path to save the results |
| input_formats (list, optional): the supported file formats. |
| concurrent_limit (int, optional): maximum number of concurrent API requests |
| sampling_rate (float, optional): fraction of files to process (0.0-1.0) |
| request_timeout (float, optional): timeout in seconds for API requests |
| random_seed (int, optional): random seed for reproducible sampling |
| group_by_document (bool, optional): group per-page results into document-level |
| file_ext_mapping (str or dict, optional): file extension mapping for grouping |
| """ |
| super().__init__( |
| save_path, |
| input_formats, |
| concurrent_limit, |
| sampling_rate, |
| request_timeout, |
| random_seed, |
| group_by_document, |
| file_ext_mapping |
| ) |
|
|
| MICROSOFT_API_KEY = os.getenv("MICROSOFT_API_KEY") or "" |
| MICROSOFT_ENDPOINT = os.getenv("MICROSOFT_ENDPOINT") or "" |
|
|
| if not all([MICROSOFT_API_KEY, MICROSOFT_ENDPOINT]): |
| raise ValueError("Please set the environment variables for Microsoft") |
|
|
| self.document_analysis_client = DocumentAnalysisClient( |
| endpoint=MICROSOFT_ENDPOINT, |
| credential=AzureKeyCredential(MICROSOFT_API_KEY) |
| ) |
|
|
| def post_process(self, data): |
| """Post-process Microsoft Document Intelligence API response to standard format.""" |
| processed_dict = {} |
| for input_key in data.keys(): |
| output_data = data[input_key] |
|
|
| processed_dict[input_key] = {"elements": []} |
|
|
| id_counter = 0 |
| for par_elem in output_data["paragraphs"]: |
| category = CATEGORY_MAP.get(par_elem["role"], "paragraph") |
| transcription = par_elem["content"] |
| coord = [[pt["x"], pt["y"]] for pt in par_elem["bounding_regions"][0]["polygon"]] |
| xy_coord = [{"x": x, "y": y} for x, y in coord] |
|
|
| data_dict = { |
| "coordinates": xy_coord, |
| "category": category, |
| "id": id_counter, |
| "content": {"text": transcription, "html": "", "markdown": ""} |
| } |
| processed_dict[input_key]["elements"].append(data_dict) |
| id_counter += 1 |
|
|
| html_transcription = "" |
| for table_elem in output_data["tables"]: |
| coord = [[pt["x"], pt["y"]] for pt in table_elem["bounding_regions"][0]["polygon"]] |
| xy_coord = [{"x": x, "y": y} for x, y in coord] |
|
|
| html_transcription += "<table>" |
|
|
| table_matrix = [ |
| ["" for _ in range(table_elem["column_count"])] |
| for _ in range(table_elem["row_count"]) |
| ] |
|
|
| for cell in table_elem["cells"]: |
| row = cell["row_index"] |
| col = cell["column_index"] |
| rowspan = cell.get("row_span", 1) |
| colspan = cell.get("column_span", 1) |
| content = cell["content"] |
|
|
| for r in range(row, row + rowspan): |
| for c in range(col, col + colspan): |
| if r == row and c == col: |
| table_matrix[r][c] = f"<td rowspan='{rowspan}' colspan='{colspan}'>{content}</td>" |
| else: |
| table_matrix[r][c] = None |
|
|
| for row in table_matrix: |
| html_transcription += "<tr>" |
| for cell in row: |
| if cell is not None: |
| html_transcription += cell |
| html_transcription += "</tr>" |
|
|
| html_transcription += "</table>" |
|
|
| data_dict = { |
| "coordinates": xy_coord, |
| "category": "table", |
| "id": id_counter, |
| "content": {"text": "", "html": html_transcription, "markdown": ""} |
| } |
| processed_dict[input_key]["elements"].append(data_dict) |
| id_counter += 1 |
|
|
| return self._merge_processed_data(processed_dict) |
|
|
| def _analyze_document(self, filepath): |
| """Analyze document using Azure Document Intelligence.""" |
| with open(filepath, "rb") as input_data: |
| poller = self.document_analysis_client.begin_analyze_document( |
| "prebuilt-layout", document=input_data |
| ) |
| result = poller.result() |
| return result.to_dict() |
|
|
| async def _call_api_async(self, filepath, *args, **kwargs): |
| """Make the actual async API call for a file.""" |
| loop = asyncio.get_event_loop() |
| return await loop.run_in_executor(None, self._analyze_document, filepath) |
|
|
| def _call_api_sync(self, filepath, *args, **kwargs): |
| """Make the actual sync API call for a file.""" |
| return self._analyze_document(filepath) |
|
|
|
|
| if __name__ == "__main__": |
| parser = create_argument_parser("Microsoft Azure Document Intelligence layout inference") |
| args = parse_args_with_extra(parser) |
|
|
| microsoft_inference = MicrosoftInference( |
| args.save_path, |
| input_formats=args.input_formats, |
| concurrent_limit=args.concurrent, |
| sampling_rate=args.sampling_rate, |
| request_timeout=args.request_timeout, |
| random_seed=args.random_seed, |
| group_by_document=args.group_by_document, |
| file_ext_mapping=args.file_ext_mapping |
| ) |
| microsoft_inference.infer(args.data_path) |
|
|