tesseract識別OPENCV PIL之間轉換例子
来源:互联网 发布:数据库一体机价格 编辑:程序博客网 时间:2024/06/03 15:51
import numpyas npimport cv2import imutilsfrom skimageimport exposurefrom pytesseractimport image_to_stringimportPIL deftake_picture(should_save=False,d_id=0):cam = cv2.VideoCapture(d_id)s, img = cam.read()if s:if should_save:cv2.imwrite('ocr.jpg',img)print"picture taken"return img defcnvt_edged_image(img_arr, should_save=False):# ratio = img_arr.shape[0] / 300.0image = imutils.resize(img_arr,height=300)gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11,17, 17)edged_image = cv2.Canny(gray_image, 30, 200) if should_save:cv2.imwrite('cntr_ocr.jpg') return edged_image '''image passed in must be ran through the cnv_edge_image first'''deffind_display_contour(edge_img_arr):display_contour = Noneedge_copy = edge_img_arr.copy()contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)top_cntrs = sorted(contours, key= cv2.contourArea, reverse = True)[:10] for cntrin top_cntrs:peri = cv2.arcLength(cntr,True)approx = cv2.approxPolyDP(cntr, 0.02 * peri, True) iflen(approx) == 4:display_contour = approxbreak return display_contour defcrop_display(image_arr):edge_image = cnvt_edged_image(image_arr)display_contour = find_display_contour(edge_image)cntr_pts = display_contour.reshape(4,2)return cntr_pts defnormalize_contrs(img,cntr_pts):ratio = img.shape[0] / 300.0norm_pts = np.zeros((4,2), dtype="float32") s = cntr_pts.sum(axis=1)norm_pts[0]= cntr_pts[np.argmin(s)]norm_pts[2]= cntr_pts[np.argmax(s)] d = np.diff(cntr_pts,axis=1)norm_pts[1]= cntr_pts[np.argmin(d)]norm_pts[3]= cntr_pts[np.argmax(d)] norm_pts *= ratio (top_left, top_right, bottom_right, bottom_left)= norm_pts width1 = np.sqrt(((bottom_right[0] - bottom_left[0]) ** 2) + ((bottom_right[1]- bottom_left[1]) ** 2))width2 = np.sqrt(((top_right[0] - top_left[0])** 2) + ((top_right[1]- top_left[1]) ** 2))height1 = np.sqrt(((top_right[0] - bottom_right[0])** 2) + ((top_right[1]- bottom_right[1]) ** 2))height2 = np.sqrt(((top_left[0] - bottom_left[0])** 2) + ((top_left[1]- bottom_left[1]) ** 2)) max_width = max(int(width1), int(width2))max_height = max(int(height1), int(height2)) dst = np.array([[0,0], [max_width-1, 0],[max_width-1, max_height -1],[0, max_height-1]],dtype="float32")persp_matrix = cv2.getPerspectiveTransform(norm_pts,dst)return cv2.warpPerspective(img,persp_matrix,(max_width,max_height)) defprocess_image(orig_image_arr):ratio = orig_image_arr.shape[0] /300.0 display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr))#display image is now segmented.gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY)gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255)) #thresholdingret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY)return thresh defocr_image(orig_image_arr):otsu_thresh_image = PIL.Image.fromarray(process_image(orig_image_arr))
return image_to_string(otsu_thresh_image, lang="letsgodigital",config="-psm 100 -c tessedit_char_whitelist=.0123456789")
import numpy as npimport cv2import imutilsfrom skimage import exposurefrom pytesseract import image_to_stringimport PILdef take_picture(should_save=False, d_id=0): cam = cv2.VideoCapture(d_id) s, img = cam.read() if s: if should_save: cv2.imwrite('ocr.jpg',img) print "picture taken" return imgdef cnvt_edged_image(img_arr, should_save=False): # ratio = img_arr.shape[0] / 300.0 image = imutils.resize(img_arr,height=300) gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11, 17, 17) edged_image = cv2.Canny(gray_image, 30, 200) if should_save: cv2.imwrite('cntr_ocr.jpg') return edged_image'''image passed in must be ran through the cnv_edge_image first'''def find_display_contour(edge_img_arr): display_contour = None edge_copy = edge_img_arr.copy() contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) top_cntrs = sorted(contours, key = cv2.contourArea, reverse = True)[:10] for cntr in top_cntrs: peri = cv2.arcLength(cntr,True) approx = cv2.approxPolyDP(cntr, 0.02 * peri, True) if len(approx) == 4: display_contour = approx break return display_contourdef crop_display(image_arr): edge_image = cnvt_edged_image(image_arr) display_contour = find_display_contour(edge_image) cntr_pts = display_contour.reshape(4,2) return cntr_ptsdef normalize_contrs(img,cntr_pts): ratio = img.shape[0] / 300.0 norm_pts = np.zeros((4,2), dtype="float32") s = cntr_pts.sum(axis=1) norm_pts[0] = cntr_pts[np.argmin(s)] norm_pts[2] = cntr_pts[np.argmax(s)] d = np.diff(cntr_pts,axis=1) norm_pts[1] = cntr_pts[np.argmin(d)] norm_pts[3] = cntr_pts[np.argmax(d)] norm_pts *= ratio (top_left, top_right, bottom_right, bottom_left) = norm_pts width1 = np.sqrt(((bottom_right[0] - bottom_left[0]) ** 2) + ((bottom_right[1] - bottom_left[1]) ** 2)) width2 = np.sqrt(((top_right[0] - top_left[0]) ** 2) + ((top_right[1] - top_left[1]) ** 2)) height1 = np.sqrt(((top_right[0] - bottom_right[0]) ** 2) + ((top_right[1] - bottom_right[1]) ** 2)) height2 = np.sqrt(((top_left[0] - bottom_left[0]) ** 2) + ((top_left[1] - bottom_left[1]) ** 2)) max_width = max(int(width1), int(width2)) max_height = max(int(height1), int(height2)) dst = np.array([[0,0], [max_width -1, 0],[max_width -1, max_height -1],[0, max_height-1]], dtype="float32") persp_matrix = cv2.getPerspectiveTransform(norm_pts,dst) return cv2.warpPerspective(img,persp_matrix,(max_width,max_height))def process_image(orig_image_arr): ratio = orig_image_arr.shape[0] / 300.0 display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr)) #display image is now segmented. gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY) gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255)) #thresholding ret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY) return threshdef ocr_image(orig_image_arr): otsu_thresh_image = PIL.Image.fromarray(process_image(orig_image_arr))return image_to_string(otsu_thresh_image, lang="letsgodigital", config="-psm 100 -c tessedit_char_whitelist=.0123456789")from:https://github.com/upupnaway/digital-display-character-rec/blob/master/digital_display_ocr.py
0 0
- tesseract識別OPENCV PIL之間轉換例子
- Tesseract + opencv
- c#利用tesseract例子
- tesseract 简单例子
- Leptonica --> tesseract --> OpenCV
- Tesseract/OpenCV on Android
- OpenCV + Tesseract on Android
- opencv与tesseract
- ubuntu 12.04安装PIL tesseract进行验证码识别
- python 使用tesseract-ocr , pytesseract , PIL进行验证码识别
- python基于PIL和tesseract的验证码识别
- python--PIL操作像素例子
- Qt、tesseract、OpenCV 环境配置
- PIL opencv 学习网站记录
- 浅谈OCR之Tesseract
- 浅谈OCR之Tesseract
- Android OCR 之 tesseract
- 浅谈OCR之Tesseract
- 配置andaroid ndk笔记
- JAVA SE 学习第六天
- 【opencv练习32 - 查找轮廓】
- 数据库组合查询与联接
- 【工控机开发】debian下安装QT及Opencv
- tesseract識別OPENCV PIL之間轉換例子
- 使用R语言进行图像分类
- 2016aws0908 亚马逊云 回顾
- 读书有什么用——北漂18年(番外篇三)
- Java中的static关键字解析
- 【opencv练习33 - 寻找轮廓凸壳】
- JavaSE基础
- 正则表达式的详细讲解,转自大神
- 动态规划求解序列问题(LIS、JLIS)