import pickle
import os
import shutil
import tensorflow as tf
from tensorflow.data import AUTOTUNE
import mlflow
import numpy as np
import pandas as pd
from deep_breath_ms.model.models import create_model
from deep_breath_ms.model.config import PARAMS, OPTIONS
from deep_breath_ms.model.utils import Utils
def make_inference(model_path, dataset_root):
inference_json = Utils.read_json(f"{dataset_root}{os.sep}inference.json")
steps_per_epoch = int(len(inference_json) / PARAMS["batch_size"])
metadata = Utils.read_json(f"{dataset_root}{os.sep}metadata.json")
model = create_model(
model_name=OPTIONS["model_arch"],
input_shape=PARAMS["dim"],
num_classes=metadata["num_classes"],
decay_steps=steps_per_epoch,
l_rate=OPTIONS["l_rate"],
)
model.load_weights(model_path)
inference_dataset = Utils.create_inference_dataset(
inference_json, PARAMS["batch_size"], shuffle=False
)
pred = model.predict(inference_dataset)
data = []
dir = f"{dataset_root}{os.sep}InferenceResults"
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
for i, (k, v) in enumerate(inference_json.items()):
data.append([k, v, pred[i][0], pred[i][1], np.argmax(pred[i]), pred[i].max()])
df_predictions = pd.DataFrame(
data,
columns=[
"Name",
"Label(1: CN, 0:CP)",
"CP prob",
"CN prob",
"Predicted label",
"Trust",
],
)
df_predictions.to_excel(f"{dataset_root}{os.sep}/InferenceResults/predictions.xlsx")
return df_predictions
def make_inference_from_pkls(pkl_folder, config, mlflow_id=None):
data_params = config.get("data_params")
# Make datasets
model_params = config.get("model_params")
batch_size = data_params.get("batch_size", PARAMS["batch_size"])
# num_classes = model_params.get("num_classes", metadata["num_classes"])
num_classes = model_params.get("num_classes", 2)
inference_dataset = Utils.create_inference_dataset_from_pkls(
pkl_folder,
batch_size,
shuffle=False)
# , batch_size, num_classes, repeat=None, shuffle=False
loaded_model = mlflow_id
if loaded_model:
# 'runs:/f28fbc1ba21148118e2f70c59b601a3f/model'
# Load model as a PyFuncModel.
loaded_model = mlflow.pyfunc.load_model(loaded_model)
for i in inference_dataset:
print(i)
predictions = loaded_model.predict(inference_dataset)
for prediction in predictions:
print(prediction)
def make_single_inference(model_path, data, num_classes=None):
steps_per_epoch = int(len(data) / PARAMS["batch_size"])
model = create_model(
model_name=OPTIONS["model_arch"],
input_shape=PARAMS["dim"],
num_classes=num_classes or PARAMS["num_classes"],
decay_steps=steps_per_epoch,
l_rate=OPTIONS["l_rate"],
)
model.load_weights(model_path)
dp = data
dp = np.expand_dims(dp, axis=2)
dp = tf.constant(dp)
ds = tf.data.Dataset.from_tensors(dp)
dataset = (
ds
.batch(PARAMS["batch_size"])
.prefetch(AUTOTUNE)
)
pred = model.predict(dataset)
return pred.tolist()