Untitled
def run(self) -> PickingForItemGroupsResponse: """ Run the model to process the request and generate a response. :return: PickingForItemGroupsResponse containing the processed collate groups. """ ctx = PickingForItemGroupsContext( center_id=self.req.context.centerId, group_id=str(self.req.context.parameters.get("PROCESS_PATH_GROUP_ID")), timestamp=int(datetime.now().timestamp() * 1000), algorithm=self.algorithm_parameters.ALGORITHM, ) centerConfig = self.s2r_config.get(str(ctx.center_id)) if centerConfig is None: raise ValueError(f"Can't find config file for center: {ctx.center_id}") processor = PickRequestProcessor(config={**self.app_config, **centerConfig}, ctx=ctx) processor.init_with_request(request=self.req) processor.prepare_model_input() self.logger.info(f"Passing algorithm parameter: {self.algorithm_parameters.__dict__} to the model.") step_name = "Model pre-processing" record_step_latency(step_name, ctx.algorithm, ctx.center_id, ctx.group_id, ctx.stop_watch.record(step_name)) result = processor.solve_stream_to_rebin(self.algorithm_parameters) step_name = "Model execution" record_step_latency(step_name, ctx.algorithm, ctx.center_id, ctx.group_id, ctx.stop_watch.record(step_name)) response = self.wire_response(result) step_name = "Model post-processing" record_step_latency(step_name, ctx.algorithm, ctx.center_id, ctx.group_id, ctx.stop_watch.record(step_name)) self.logger.info( "[Tetris Model Latency Breakdown]" + ctx.stop_watch.__str__() ) # print out the latency breakdown. record_step_latency("Overall", ctx.algorithm, ctx.center_id, ctx.group_id, ctx.stop_watch.duration()) return response
Leave a Comment