Untitled
user_2381398
plain_text
2 years ago
2.4 kB
5
Indexable
# Import packages
import time
import cv2
import numpy as np
import ddddocr
import pyautogui
from io import BytesIO
import pyscreenshot
import pyscreenshot as ImgCap
# Load the template image
template = cv2.imread('captcha_box.png', 0) # replace with your template
w, h = template.shape[::-1]
# Instance
cls_ocr = ddddocr.DdddOcr()
while True: # I assume it's an infinite loop here, if there is a specific condition you can modify it at True
# Capturing full screen image
img = pyscreenshot.grab()
img.save('screenshot.png')
screenshot = cv2.imread('screenshot.png', 0)
# Template matching
res = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8 # adjust this value based on your requirement
loc = np.where(res >= threshold)
if np.any(loc[0]): # if there is any match
for pt in zip(*loc[::-1]): # for each matching location
# Define relative position and new size
offset_x = 60 # adjust this value based on your requirement
offset_y = 45 # adjust this value based on your requirement
new_w = 260 # adjust this value based on your requirement
new_h = 85 # adjust this value based on your requirement8
# Capturing image in the matched area with relative position
bbox = (pt[0] + offset_x, pt[1] + offset_y, pt[0] + offset_x + new_w, pt[1] + offset_y + new_h)
cls_img = pyscreenshot.grab(bbox=bbox)
cls_img.save('screenshot.png')
# Doing OCR
try:
# Convert PIL image to BytesIO
cls_img_bytes = BytesIO()
cls_img.save(cls_img_bytes, format='PNG') # You are using the PIL Image object cls_img here
cls_img_bytes = cls_img_bytes.getvalue()
ret = cls_ocr.classification(cls_img_bytes) # OCR classification
print(f"ret = {ret}") # Debug output
time.sleep(0.5) # Avoid 'Watch Dog' problem
# Typing OCR result
pyautogui.typewrite(ret) # type out the result
time.sleep(0.5) # simulate the interval of human typing
pyautogui.press('enter') # press 'Enter'
except:
print("An exception occurred")
# Waiting some time
time.sleep(3)
Editor is loading...