from typing import Dict, Any import torch import base64 from io import BytesIO from model import Model from PIL import Image # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load the optimized model self.model = Model() def __call__(self, data: Any) -> Any: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) image = Image.open(BytesIO(base64.b64decode(inputs['image']))) # run inference pipeline _, res = self.model.process_lineart(image) # encoding image as base 64 is done by the default toolkit return res