diff --git a/Framework/Built_In_Automation/Desktop/CrossPlatform/BuiltInFunctions.py b/Framework/Built_In_Automation/Desktop/CrossPlatform/BuiltInFunctions.py index 13df5293d..005bc91fb 100644 --- a/Framework/Built_In_Automation/Desktop/CrossPlatform/BuiltInFunctions.py +++ b/Framework/Built_In_Automation/Desktop/CrossPlatform/BuiltInFunctions.py @@ -11,6 +11,8 @@ # Modules # # # ######################### +from Framework.Built_In_Automation.Database.BuiltInFunctions import sr + try: import pyautogui as gui except: @@ -641,10 +643,13 @@ def check_for_element(data_set): # Parse data set try: + variable_name = None file_name = "" for row in data_set: if row[1] == "element parameter": file_name = row[2] + elif "action" in row[1]: + variable_name = row[2].strip() if file_name == "": CommonUtil.ExecLog( @@ -671,9 +676,13 @@ def check_for_element(data_set): if element in failed_tag_list: CommonUtil.ExecLog(sModuleInfo, "Element not found", 3) + x = 'False' + sr.Set_Shared_Variables(variable_name, x) return "zeuz_failed" else: CommonUtil.ExecLog(sModuleInfo, "Found element", 1) + x = 'True' + sr.Set_Shared_Variables(variable_name, x) return "passed" except Exception: diff --git a/Framework/Built_In_Automation/Desktop/Windows/BuiltInFunctions.py b/Framework/Built_In_Automation/Desktop/Windows/BuiltInFunctions.py index e85490ff3..816dda946 100644 --- a/Framework/Built_In_Automation/Desktop/Windows/BuiltInFunctions.py +++ b/Framework/Built_In_Automation/Desktop/Windows/BuiltInFunctions.py @@ -34,6 +34,7 @@ from PIL import Image, ImageGrab + python_folder = [] for location in subprocess.getoutput("where python").split("\n"): if "Microsoft" not in location: @@ -915,6 +916,17 @@ def image_search(step_data_set): confidence = 0.85 parent_dataset = [] image_text = "" + left_width = "" + top_height = "" + colour_state = "" + method_image = "" + language = "" + t_conf = 0.9 + text_screenshot = '' + easyocr_paragraph = '' + + + for left, mid, right in step_data_set: left = left.strip().lower() mid = mid.strip().lower() @@ -927,12 +939,44 @@ def image_search(step_data_set): confidence = float(right.replace("%", "").replace(" ", "").lower()) / 100 elif "text" in left: image_text = right + elif "left_width" in left: + left_width = float(right) + elif "top_height" in left: + top_height = float(right) + elif "colour" in left: + colour_state = right + elif 'language' in left: + language = right + elif 'method_image' in left: + method_image = right + elif 't_conf' in left: + t_conf = float(right) + elif 't_screenshot' in left: + text_screenshot = right + elif 'easyocr_paragraph' in left: + easyocr_paragraph = right + else: - file_name = right.strip() - if "~" in file_name: - file_name = str(Path(os.path.expanduser(file_name))) + if not image_text: + pass + else: + file_name = right.strip() + if "~" in file_name: + file_name = str(Path(os.path.expanduser(file_name))) if mid == "parent parameter": parent_dataset.append((left, "element parameter", right)) + if left_width == '': + left_width = 1 + if top_height == '': + top_height = 1 + if colour_state == '': + colour_state = 'black_white' + if method_image == '': + method = 'method_1' + if language == '': + language = 'en' + + if parent_dataset: parent = Get_Element(parent_dataset) @@ -963,50 +1007,132 @@ def image_search(step_data_set): # Find element information try: if image_text: - import cv2 - from pytesseract import pytesseract - pytesseract.tesseract_cmd = os.environ["PROGRAMFILES"] + r"\Tesseract-OCR\tesseract.exe" - - image_text = image_text.replace(" ", "").lower() - PIL.ImageGrab.grab().crop((left, top, left + width, top + height)).save("sample.jpg") - imge = cv2.imread("sample.jpg") - gray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY) - - data = pytesseract.image_to_boxes(gray) - all_letters = data.split("\n") - print(all_letters) - full_string = "" - for i in all_letters: - full_string += i[0] if len(i) > 0 else "~" - full_string = full_string.lower() - - all_pos = [m.start() for m in re.finditer(image_text, full_string)] - - if -len(all_pos) <= idx < len(all_pos): - CommonUtil.ExecLog(sModuleInfo, "Found %s text elements. Returning element of index %s" % (len(all_pos), idx), 1) - i = all_pos[idx] - elif len(all_pos) != 0: - CommonUtil.ExecLog(sModuleInfo, "Found %s text elements. Index out of range" % len(all_pos), 3) - return "zeuz_failed" - else: - CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) - return "zeuz_failed" - msg = "" - a = all_letters[i:i + len(image_text)] - for i in a: - msg += i + "\n" - left_top = list(map(int, a[0].split(" ")[1:3])) - right_bottom = list(map(int, a[-1].split(" ")[3:5])) - center = left + (right_bottom[0] + left_top[0]) // 2, top + height - (right_bottom[1] + left_top[1]) // 2 - msg += "Center = " + str(center) + "\n" - # pyautogui.moveTo(center) + if method_image == 'method_1': + import cv2 + from pytesseract import pytesseract + pytesseract.tesseract_cmd = os.environ["PROGRAMFILES"] + r"\Tesseract-OCR\tesseract.exe" - element = left_top[0] + left, height - right_bottom[1] + top, right_bottom[0] - left_top[0], right_bottom[1] - left_top[1] - msg += "Coordinates = " + str(element) + "\n" - CommonUtil.ExecLog(sModuleInfo, msg, 5) + image_text = image_text.replace(" ", "").lower() + PIL.ImageGrab.grab().crop((left, top, left + width * left_width, top + height * top_height)).save("sample.jpg") + imge = cv2.imread("sample.jpg") + gray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY) + + if colour_state == "black_white": + data = pytesseract.image_to_boxes(gray) + else: + data = pytesseract.image_to_boxes(imge) + all_letters = data.split("\n") + print(all_letters) + full_string = "" + for i in all_letters: + full_string += i[0] if len(i) > 0 else "~" + full_string = full_string.lower() + + all_pos = [m.start() for m in re.finditer(image_text, full_string)] + + if -len(all_pos) <= idx < len(all_pos): + CommonUtil.ExecLog(sModuleInfo, "Found %s text elements. Returning element of index %s" % (len(all_pos), idx), 1) + i = all_pos[idx] + elif len(all_pos) != 0: + CommonUtil.ExecLog(sModuleInfo, "Found %s text elements. Index out of range" % len(all_pos), 3) + return "zeuz_failed" + else: + CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) + return "zeuz_failed" + + msg = "" + a = all_letters[i:i + len(image_text)] + for i in a: + msg += i + "\n" + left_top = list(map(int, a[0].split(" ")[1:3])) + right_bottom = list(map(int, a[-1].split(" ")[3:5])) + center = left + (right_bottom[0] + left_top[0]) // 2, top + height - (right_bottom[1] + left_top[1]) // 2 + msg += "Center = " + str(center) + "\n" + # pyautogui.moveTo(center) + + element = left_top[0] + left, height - right_bottom[1] + top, right_bottom[0] - left_top[0], right_bottom[1] - left_top[1] + msg += "Coordinates = " + str(element) + "\n" + CommonUtil.ExecLog(sModuleInfo, msg, 5) + + return _Element(element) + + else: + import easyocr + import numpy as np + import cv2 + from pytesseract import pytesseract + import matplotlib.pyplot as plt + from difflib import SequenceMatcher + + pytesseract.tesseract_cmd = os.environ["PROGRAMFILES"] + r"\Tesseract-OCR\tesseract.exe" + + PIL.ImageGrab.grab().save("sample.png") + reader = easyocr.Reader([language]) + if easyocr_paragraph == 'true': + output = reader.readtext("sample.png",paragraph=True) + else: + output = reader.readtext("sample.png", paragraph=False) + + item = [] + count = 0 + crop_counter = 0 + for text in output: + def seq(a, b): + c = SequenceMatcher(a=a, b=b).ratio() + print(c) + if c > 0.8: + return c + + else: + return .00004 + + rslt = seq(image_text, text[1]) + if rslt >= float(t_conf): + # if image_text in text[1]: + item.append([text]) + print(text) + CommonUtil.ExecLog(sModuleInfo, "Found %s text. Returning element of index %s" % (image_text, count), 1) + count = count + 1 + else: + print(text) + continue + + if item == []: + CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) + return "zeuz_failed" + cord = np.array(item[idx][0]) + cord1 = cord.tolist() + # + x_min, y_min = [min(cord_val) for cord_val in zip(*cord1[0])] + x_max, y_max = [max(cord_val) for cord_val in zip(*cord1[0])] + + if text_screenshot !='': + img = cv2.imread("sample.png") + cropping = img[y_min:y_max , x_min:x_max] + cv2.imwrite('cropped_image.png', cropping) + from PIL import Image + + image = Image.open('cropped_image.png') + gray_image = image.convert('L') + output_2 = pytesseract.image_to_string(gray_image) + print(output_2) + + if output_2 in image_text or image_text in output_2: + element = x_min, y_min, x_max - x_min, y_max - y_min + crop_counter = 1 + return _Element(element) + else: + CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) + return "zeuz_failed" + else: + pass + if crop_counter == 0: + element = x_min,y_min,x_max-x_min,y_max-y_min + return _Element(element) + else: + pass - return _Element(element) else: # Scale image if required regex = re.compile(r"(\d+)\s*x\s*(\d+)", re.IGNORECASE) # Create regex object with expression @@ -1042,6 +1168,118 @@ def image_search(step_data_set): except: return CommonUtil.Exception_Handler(sys.exc_info()) +@logger +def new_image_text(step_data_set): + sModuleInfo = inspect.currentframe().f_code.co_name + " : " + MODULE_NAME + + import easyocr + import numpy as np + import cv2 + from pytesseract import pytesseract + import matplotlib.pyplot as plt + from difflib import SequenceMatcher + + pytesseract.tesseract_cmd = os.environ["PROGRAMFILES"] + r"\Tesseract-OCR\tesseract.exe" + + + file_name = "" + resolution = "" + idx = 0 + confidence = 0.85 + parent_dataset = [] + image_text = "" + left_width = "" + top_height = "" + colour_state = "" + method_image = "" + language = "" + t_conf = 0.9 + text_screenshot = '' + easyocr_paragraph = '' + + + + for left, mid, right in step_data_set: + left = left.strip().lower() + mid = mid.strip().lower() + if mid == "element parameter": + if "index" in left: + idx = int(right.strip()) + elif "ntext" in left: + image_text = right + elif 'language' in left: + language = right + elif 't_conf' in left: + t_conf = float(right) + elif 't_screenshot' in left: + text_screenshot = right + elif 'easyocr_paragraph' in left: + easyocr_paragraph = right + + PIL.ImageGrab.grab().save("sample.png") + reader = easyocr.Reader([language]) + if easyocr_paragraph == 'true': + output = reader.readtext("sample.png", paragraph=True) + else: + output = reader.readtext("sample.png", paragraph=False) + + item = [] + count = 0 + crop_counter = 0 + for text in output: + def seq(a, b): + c = SequenceMatcher(a=a, b=b).ratio() + print(c) + if c > 0.8: + return c + + else: + return .00004 + + rslt = seq(image_text, text[1]) + if rslt >= float(t_conf): + # if image_text in text[1]: + item.append([text]) + print(text) + CommonUtil.ExecLog(sModuleInfo, "Found %s text. Returning element of index %s" % (image_text, count), 1) + count = count + 1 + else: + print(text) + continue + + if item == []: + CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) + return "zeuz_failed" + cord = np.array(item[idx][0]) + cord1 = cord.tolist() + # + x_min, y_min = [min(cord_val) for cord_val in zip(*cord1[0])] + x_max, y_max = [max(cord_val) for cord_val in zip(*cord1[0])] + + if text_screenshot != '': + img = cv2.imread("sample.png") + cropping = img[y_min:y_max, x_min:x_max] + cv2.imwrite('cropped_image.png', cropping) + from PIL import Image + + image = Image.open('cropped_image.png') + gray_image = image.convert('L') + output_2 = pytesseract.image_to_string(gray_image) + print(output_2) + + if output_2 in image_text or image_text in output_2: + element = x_min, y_min, x_max - x_min, y_max - y_min + crop_counter = 1 + return _Element(element) + else: + CommonUtil.ExecLog(sModuleInfo, 'Could not find text "%s"' % image_text, 3) + return "zeuz_failed" + else: + element = x_min, y_min, x_max - x_min, y_max - y_min + return _Element(element) + + + def _scale_image(file_name, size_w, size_h): """ This function calculates ratio and scales an image for comparison by _pyautogui() """ @@ -1108,6 +1346,11 @@ def Get_Element(data_set, wait_time=Shared_Resources.Get_Shared_Variables("eleme elif "path" in left: element_path = right.strip() elif "image" in left: element_image.append((left, mid, right)) + elif "imagetext" in left: + element_image.append((left, mid, right)) + elif "ntext" in left: + element_image.append((left, mid, right)) + elif mid == "parent parameter": parent = True @@ -1154,6 +1397,40 @@ def Get_Element(data_set, wait_time=Shared_Resources.Get_Shared_Variables("eleme s = time.time() while True: if element_image: + left_width = 1 + top_height = 1 + colour_state = 'black_white' + method_image = 'method_1' + t_conf = 0.9 + language = 'en' + text_screenshot = '' + easyocr_paragraph = '' + + for left, mid, right in data_set: + left = left.strip().lower() + mid = mid.strip().lower() + if mid == "element parameter": + if "left_width" in left: + left_width = right + elif "top_height" in left: + top_height = right + elif "colour" in left: + colour_state = right + elif 'language' in left: + language = right + elif 'method_image' in left: + method_image = right + elif 't_conf' in left: + t_conf = right + elif 't_screenshot' in left: + text_screenshot = right + elif 'easyocr_paragraph' in left: + easyocr_paragraph = right + + + + + _get_main_window(window_name) for i in (("name", parent_name), ("class", parent_class), ("automationid", parent_automation), ("control", parent_control), ("path", parent_path), ("window", window_name), ("index", parent_index)): if i[1]: @@ -1163,8 +1440,22 @@ def Get_Element(data_set, wait_time=Shared_Resources.Get_Shared_Variables("eleme data = (i[0], "parent parameter", i[1]) element_image.append(data) element_image.append(("index", "element parameter", str(element_index))) - result = image_search(element_image) - return result + element_image.append(("left_width", "element parameter", str(left_width))) + element_image.append(("top_height", "element parameter", str(top_height))) + element_image.append(("colour_state", "element parameter", str(colour_state))) + element_image.append(("language", "element parameter", str(language))) + element_image.append(("method_image", "element parameter", str(method_image))) + element_image.append(("t_conf", "element parameter", str(t_conf))) + element_image.append(("t_screenshot", "element parameter", str(text_screenshot))) + element_image.append(("easyocr_paragraph", "element parameter", str(easyocr_paragraph))) + + + if 'ntext' in element_image[0][0]: + result = new_image_text(element_image) + return result + else: + result = image_search(element_image) + return result if element_path: all_elements = Element_path_search(window_name, element_path) if all_elements == "zeuz_failed" and time.time() < s + wait_time: @@ -2095,3 +2386,5 @@ def save_attribute_values_in_list(data_set): return Shared_Resources.Set_Shared_Variables(variable_name, []) except Exception: return CommonUtil.Exception_Handler(sys.exc_info()) + +