Untitled
unknown
plain_text
2 years ago
1.7 kB
10
Indexable
def evaluate_model(image_features, model_features):
# Compute metrics
mAP = compute_map(image_features, model_features)
# Initialize counters and metrics
classwise_model_count = {}
for _, model_class in model_features:
classwise_model_count[model_class] = classwise_model_count.get(model_class, 0) + 1
ft, st, mrr, dcg_scores = 0, 0, 0, []
for (image_feature, image_class) in image_features:
distances = []
for (model_feature, model_class) in model_features:
distance = 1 - torch.cosine_similarity(image_feature, model_feature).item()
distances.append((distance, model_class))
distances.sort(key=lambda x: x[0])
ft += sum(1 for _, model_class in distances[:classwise_model_count[image_class]] if model_class == image_class) / classwise_model_count[image_class]
st += sum(1 for _, model_class in distances[:2 * classwise_model_count[image_class]] if model_class == image_class) / classwise_model_count[image_class]
for rank, (_, model_class) in enumerate(distances, 1):
if model_class == image_class:
mrr += 1 / rank
break
rel_scores = [1 if model_class == image_class else 0 for _, model_class in distances]
dcg_scores.append(dcg_at_k(rel_scores, len(rel_scores)))
avg_ft = ft / len(image_features)
avg_st = st / len(image_features)
avg_mrr = mrr / len(image_features)
avg_dcg = np.mean(dcg_scores)
return {
"mAP": mAP,
"First Tier": avg_ft,
"Second Tier": avg_st,
"MRR": avg_mrr,
"DCG": avg_dcg
}
Editor is loading...
Leave a Comment