abnormal_alignment.py

mail@pastecode.io avatar
unknown
python
2 years ago
5.9 kB
8
Indexable
Never
import requests
from utils.constants import HELPER_API
from utils.common import BaseClassifier, get_blurry_score
import cv2
import numpy as np
import math
from PIL import Image, ImageDraw

def get_align_diff(img, can, dan, so):
    if dan[1][0] - can[1][0] == 0:
        return so[0][0] - can[0][0], 0
    angle = math.atan((dan[1][1] - can[1][1]) / (dan[1][0] - can[1][0]))
    (h, w) = img.shape[:2]
    center = (w // 2, h // 2)
    M = cv2.getRotationMatrix2D(center, - angle * 180 / np.pi, 1.0)

    rotatedpolygon1 = cv2.transform(np.asarray(can).reshape((2, 1, 2)),M)
    rotatedpolygon3 = cv2.transform(np.asarray(so).reshape((2, 1, 2)),M)
    ori = so[0][0] - can[0][0]
    
    return ori, rotatedpolygon3[0][0][0] - rotatedpolygon1[0][0][0] - ori

def get_x(img, is_three=False):
    img_tmp = img.copy()
    gray = cv2.cvtColor(img_tmp, cv2.COLOR_BGR2GRAY)
    ret3,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    h = thresh.shape[0]
    kernel1 = np.ones((int(h / 7) + 1, int( h / 7) + 1), 'uint8')
    kernel2 = np.ones((int(h / 7), int( h / 7)), 'uint8')
    new_thresh = cv2.dilate(~thresh, kernel1)
    new_thresh = cv2.erode(new_thresh, kernel2)
    
    contours,hierarchy = cv2.findContours(new_thresh, 1, 2)
    if len(contours) == 0:
        return 0

    if is_three:
        contours = sorted(contours, key=cv2.contourArea, reverse=True)
        cnt = contours[0]
        if len(contours) == 2:
            if np.asarray(contours[0]).min(axis=0)[0][0] >= np.asarray(contours[1]).min(axis=0)[0][0]:
                cnt = contours[1]
        elif len(contours) >= 3:        
            if np.asarray(contours[0]).min(axis=0)[0][0] >= np.asarray(contours[1]).min(axis=0)[0][0] and np.asarray(contours[2]).min(axis=0)[0][0] >= np.asarray(contours[1]).min(axis=0)[0][0]:
                cnt = contours[1]
            elif np.asarray(contours[0]).min(axis=0)[0][0] >= np.asarray(contours[2]).min(axis=0)[0][0] and np.asarray(contours[1]).min(axis=0)[0][0] >= np.asarray(contours[2]).min(axis=0)[0][0]:
                cnt = contours[2]
    else:
        cnt = max(contours, key = cv2.contourArea)
    x,y,w,h = cv2.boundingRect(cnt)
    return x

def align_check(img, ocr_bbs, attr_bbs):
    print(ocr_bbs)
    print(attr_bbs)
    print(img.shape)
    h, w = img.shape[:2]
    name = np.asarray(attr_bbs['full_name']).reshape((2, 2)).tolist()
    resi = np.asarray(attr_bbs['place_of_residence']).reshape((2, 2)).tolist()

    img_name = img[name[0][1]:name[1][1], name[0][0]:name[1][0]]
    img_resi = img[resi[0][1]:resi[1][1], resi[0][0]:resi[1][0]]
    first_type = ocr_bbs["type"][max(len(ocr_bbs["type"]) - 4, 0)]
    can = (np.asarray(first_type).reshape((2, 2))).astype(int).tolist()
    so = np.asarray(attr_bbs['no']).reshape((2, 2)).tolist()

    img_so = img[so[0][1]:so[1][1], so[0][0]:so[1][0]]
    img_can = img[can[0][1]:can[1][1], can[0][0]:can[1][0]]

    if img_so.shape[0] == 0 or img_so.shape[1] == 0:
        return None
    else:
        add_so_x = get_x(img_so)
    if img_can.shape[0] == 0 or img_can.shape[1] == 0:
        return None
    else:
        add_can_x = get_x(img_can, is_three=True)

    img_so_blur = get_blurry_score(img_so)
    img_can_blur = get_blurry_score(img_can)

    if min(img_can_blur, img_so_blur) < 0.7:
        return None

    if img_name.shape[0] == 0 or img_name.shape[1] == 0:
        return None
    if img_resi.shape[0] == 0 or img_resi.shape[1] == 0:
        return None

    add_name_x = get_x(img_name, is_three=True)
    add_resi_x = get_x(img_resi, is_three=True)

    ori, add_align = get_align_diff(img, can=(np.asarray(first_type).reshape((2, 2))).astype(int).tolist(), dan=(np.asarray(ocr_bbs["type"][-1]).reshape((2, 2))).astype(int).tolist(), so=np.asarray(attr_bbs['no']).reshape((2, 2)).tolist())

    x_diff = name[0][0] - resi[0][0] - add_name_x + add_resi_x
    y_diff = name[0][1] - resi[0][1]
    add_nghieng_y = int(((so[0][1] + so[1][1]) / 2 - (can[0][1] + can[1][1]) / 2) * x_diff / y_diff) if y_diff != 0 else 0
    res = ori - add_can_x + add_so_x 
    res = (res + 0.5) - (add_align / 1 + add_nghieng_y/1) / 2

    print(ori)
    print(add_align)
    print(add_can_x)
    print(add_so_x)
    print(add_nghieng_y)
    print(res)
    return float(res / w  * 100)


class AbnormalAlignmenModel(BaseClassifier):    
    def predict(self, image_data, ocr_front, blur_score, card_type):                
        results = {
                "message": "success",
                "data": {}
        }

        if card_type != "idnew_front" or blur_score < 5:
            results["data"] = {
                "score": 0,
                "prediction": False
            }

            return results
                    
        if type(image_data) == str:
            # file_path
            r = requests.post(f"{HELPER_API}/idcard/attribute-text/detect", files={"image_file": open(image_data, "rb")})
            img = cv2.imread(image_data)
        else:
            img = image_data
            success, encoded_image = cv2.imencode('.jpg', image_data)
            content = encoded_image.tobytes()
            r = requests.post(f"{HELPER_API}/idcard/attribute-text/detect", files={"image_file": content})
            
        attr_boxes = r.json()["data"]["prediction"]
        pil_image = Image.fromarray(img[:, :, ::-1])
        draw = ImageDraw.Draw(pil_image)
        for k, v in attr_boxes.items():
            l, t, r, b = v                        
            draw.rectangle([l, t, r, b], outline=(0, 255, 0), width=2)

        pil_image.save("uploaded/attr-image.jpg")
        score = align_check(img, ocr_front["data"]["field_bbs"], attr_boxes)
        results["data"] = {
            "prediction": score < -0.5, # -1.0, -0.75, -0.5, -0.4, -0.3
            "score": score
        }                
        return results