<>声明:只是用来学习,请不要使用非法用途,责任自负
虽然最近疫情总是反反复复,
但还是有许多小伙伴后台私信我有没有可以抢课的方法,
于是这个教程就来了
不过咱该说的还是说,别整违法的事情!
<>工具
使用python 3.6版本,安装如下库:
* 安装win32api
-pip3 install pywin32
* 安装PIL
-pip install Pillow
* 安装pyautogui
-pip install pyautogui
* 安装numpy
-pip install numpy
* 安装cv2
-pip install opencv-python
* 安装matplotlib
-pip install matplotlib
使用SPY查看相关窗口标题, 类名。
此标题唯一, 故可以以此来查找相关窗口
<>得到窗口句柄
window_title = '课件学习 - Google Chrome' #python学习裙:660193417## screen_width =
win32api.GetSystemMetrics(0) screen_height = win32api.GetSystemMetrics(1) hwnd =
win32gui.FindWindow(win32con.NULL,window_title) if hwnd == 0 : error_exit('%s
not found' % window_title) exit() else: print('hwnd = %x'%(hwnd)) window_left,
window_top,window_right,window_bottom = win32gui.GetWindowRect(hwnd)
<>主循环
原理:
主要通信截图屏幕的图片,
然后通过模板图像与之比较,
如果出现我们需要的场景,
那么得到对应的位置坐标,
然后自动调用点击功能,
从而实现自动化操作。
那么这里主要使用opencv的两个算法,
一个是图像相似度打分算法,
另一个是图像搜索算法。
while True: grab_image = snapshot.grab_screen(deal_left,deal_top,deal_right,
deal_bottom) #grab_image.show() grab_image.save(r'.\tmp_output\full_screen.png')
#big pic size = 1936x1056 full_screen_w = 1936 full_screen_h = 1056 pixel_core_x
= 877.0 pixel_core_y = 25.0 deal_left = window_left #window_left + kejian_x /
full_screen_w * window_width - 100 deal_top = window_top + pixel_core_y /
full_screen_h* window_height - 20 deal_right = window_left + window_width
#window_left + kejian_x / full_screen_w * window_width + 150 deal_bottom =
window_top+ pixel_core_y / full_screen_h * window_height + 20 grab_image =
snapshot.grab_screen(deal_left,deal_top,deal_right,deal_bottom) search_pic =
r'.\tmp_output\search_kejianxuexi.png' grab_image.save(search_pic) #find
kejian_tem template_pic = r'.\template\kejian_tem.png' num, w, h, pos_list =
match.lookup_pos(template_pic, search_pic) left = 0 top = 0 find_kejian_flag = 0
no_voice_flag= 0 if num == 1: left = pos_list[0][0] top = pos_list[0][1]
find_kejian_flag= 1 else: print('==========warning search_kejianxuexi = ' + str(
num)) find_kejian_flag = 0 if find_kejian_flag: img_rgb = cv2.imread(search_pic)
img_rgb= img_rgb[top:top + h, left:left + w + 80, :] # h, w, c compare_pic =
r'.\tmp_output\kejianxuexi_compare.png' cv2.imwrite(compare_pic, img_rgb)
temp_voice= r'.\template\kejianhua_tem_voice.png' temp_no_voice =
r'.\template\kejianhua_tem_no_voice.png' no_voice_flag = match.score_pic(
compare_pic, temp_voice, temp_no_voice) if no_voice_flag: print(
'===============find no_voice_flag') find_question_flag = find_question() if
find_question_flag: #second time.sleep(5) find_daan() time.sleep(5) find_quding(
) find_chongbo_flag = find_chong_bo() if find_question_flag and
find_chongbo_flag: print('========>find_chongbo_flag and find_chongbo_flag')
exit() if find_chongbo_flag: weikaishi() else: print('===============every
thing is ok') time.sleep(2) #exit(0)
<>图像相似度打分算法
那么如何判断一张被PS过的图片是否与另一张图片本质上相同呢?
比较简单、易用的解决方案是采用感知哈希算法(Perceptual Hash Algorithm)。
感知哈希算法是一类算法的总称,
包括aHash、pHash、dHash。
顾名思义,感知哈希不是以严格的方式计算Hash值,
而是以更加相对的方式计算哈希值,因为“相似”与否,
就是一种相对的判定。
aHash:平均值哈希。速度比较快,但是常常不太精确。
pHash:感知哈希。精确度比较高,但是速度方面较差一些。
dHash:差异值哈希。Amazing!精确度较高,且速度也非常快。因此我就选择了dHash作为我图片判重的def
pHash(imgfile): """get image pHash value""" #加载并调整图片为32x32灰度图片 img=cv2.imread(
imgfile, 0) img=cv2.resize(img,(64,64),interpolation=cv2.INTER_CUBIC) #创建二维列表 h,
w= img.shape[:2] vis0 = np.zeros((h,w), np.float32) vis0[:h,:w] = img #填充数据
#二维Dct变换 vis1 = cv2.dct(cv2.dct(vis0))
#cv.SaveImage('a.jpg',cv.fromarray(vis0)) #保存图片 vis1.resize(32,32)
#把二维list变成一维list img_list=(vis1.tolist()) print('----------') sum(img_list)
#计算均值 avg = sum(img_list)/(len(img_list)*1.0) print('----------') avg_list = [
'0' if i<avg else '1' for i in img_list] #得到哈希值 return ''.join(['%x' % int(''.
join(avg_list[x:x+4]),2) for x in range(0,32*32,4)]) def hammingDist(s1, s2):
assert len(s1) == len(s2) return sum([ch1 != ch2 for ch1, ch2 in zip(s1, s2)])
def aHash(img): #缩放为8*8 img=cv2.resize(img,(8,8),interpolation=cv2.INTER_CUBIC)
#转换为灰度图 gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #s为像素和初值为0,hash_str为hash值初值为''
s=0 hash_str='' #遍历累加求像素和 for i in range(8): for j in range(8): s=s+gray[i,j]
#求平均灰度 avg=s/64 #灰度大于平均值为1相反为0生成图片的hash值 for i in range(8): for j in range(8):
if gray[i,j]>avg: hash_str=hash_str+'1' else: hash_str=hash_str+'0' return
hash_strdef cmpHash(hash1,hash2): n=0 #hash长度不同则返回-1代表传参出错 if len(hash1)!=len(
hash2): return -1 #遍历判断 for i in range(len(hash1)): #不相等则n计数+1,n最终为相似度 if hash1[
i]!=hash2[i]: n=n+1 return 1 - n / 64 def score_pic(compare_pic, temp_voice,
temp_no_voice): #HASH1=pHash(compare_pic) #HASH2=pHash(temp_voice) #out_score =
1 - hammingDist(HASH1,HASH2)*1. / (32*32/4) img1 = cv2.imread(compare_pic) img2
= cv2.imread(temp_voice) img3 = cv2.imread(temp_no_voice) #time1 = time.time()
hash1= aHash(img1) hash2 = aHash(img2) voice_score = cmpHash(hash1, hash2) hash1
= aHash(img1) hash3 = aHash(img3) no_voice_score = cmpHash(hash1, hash3)
no_voice_flag= 0 #print(str(voice_score) + '=>' + str(no_voice_score)) if
no_voice_score>= voice_score: no_voice_flag = 1 else: no_voice_flag = 0 return
no_voice_flag
<>图像搜索算法
使用res= cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
def lookup_pos(template_pic, search_pic): img_rgb = cv2.imread(search_pic)
img_gray= cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) img = img_gray
#print(img.shape) template = cv2.imread(template_pic,0) w, h = template.shape[::
-1] res = cv2.matchTemplate(img,template,cv2.TM_CCOEFF_NORMED) threshold = 0.95
loc= np.where( res >= threshold) num = 0 left = 0 top = 0 pos_list = [] for pt
in zip(*loc[::-1]): cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255)
, 2) left = pt[0] top = pt[1] pos_list.append(pt) num = num + 1 res = res*256
cv2.imwrite(r'.\tmp_output\out.png', img_rgb) cv2.imwrite(
r'.\tmp_output\res.png', res) return num, w, h, pos_list
<>其实上课是一件美事,
<>只是你们自己还不知道…
<>今天的文章就到这里啦~
<>我是小熊猫,咱下篇文章再见啦(✿◡‿◡)