Untitled

 avatar
wyc1230
plain_text
17 days ago
3.5 kB
6
Indexable
from prometheus_client import Histogram
from datetime import datetime
from metrics import algorithm_latency  # Importing the shared latency metric

# Define new histograms for major steps in `run()`
run_step_latency = Histogram(
    "run_step_latency_seconds",
    "Latency of major steps in the run() method",
    labelnames=["step", "algorithm", "mode"],
    buckets=[0.1, 0.5, 1, 2, 3, 5, 10, 15, 20, 30, 60]
)

class ModelRunner:
    def __init__(self, req, s2r_config, app_config, algorithm_parameters, logger, env):
        self.req = req
        self.s2r_config = s2r_config
        self.app_config = app_config
        self.algorithm_parameters = algorithm_parameters
        self.logger = logger
        self.env = env

    def run(self) -> PickingForItemGroupsResponse:
        """
        Run the model to process the request and generate a response.

        :return: PickingForItemGroupsResponse containing the processed collate groups.
        """
        algorithm = self.algorithm_parameters.get("name", "unknown")
        
        # Measure latency for context initialization
        start_time = datetime.now()
        ctx = PickingForItemGroupsContext(
            center_id=self.req.context.centerId,
            group_id="MOCK_GROUP",
            timestamp=int(start_time.timestamp() * 1000),
        )
        run_step_latency.labels(step="context_init", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        # Measure latency for config retrieval
        start_time = datetime.now()
        centerConfig = self.s2r_config.get(str(ctx.center_id))
        if centerConfig is None:
            raise ValueError(f"Can't find config file for center: {ctx.center_id}")
        run_step_latency.labels(step="config_retrieval", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        # Measure latency for processor initialization
        start_time = datetime.now()
        processor = PickRequestProcessor(config={**self.app_config, **centerConfig}, ctx=ctx)
        processor.init_with_request(request=self.req)
        run_step_latency.labels(step="processor_init", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        # Measure latency for model input preparation
        start_time = datetime.now()
        processor.prepare_model_input()
        run_step_latency.labels(step="prepare_model_input", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        # Measure latency for solving the model
        start_time = datetime.now()
        self.logger.info(f"Passing algorithm parameter: {self.algorithm_parameters.__dict__} to the model.")
        result = processor.solve_stream_to_rebin(self.algorithm_parameters)
        run_step_latency.labels(step="solve_model", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        # Measure latency for response wiring
        start_time = datetime.now()
        response = self.wire_response(result)
        run_step_latency.labels(step="wire_response", algorithm=algorithm, mode=self.env.mode).observe(
            (datetime.now() - start_time).total_seconds()
        )

        return response
Editor is loading...
Leave a Comment