干什么用的?
支持演员界面和清单界面,会生成excel文件,然后自己筛选想要下哪些。界面如下。


由于我没有会员,所以FC2那些需要会员的我测试不了。
代码如下
import os #系统操作、文件夹操作等
try: #尝试引用pandas库
import pandas as pd
except ImportError: #若引用失败,则调系统pip命令安装
os.system('pip install pandas')
import pandas as pd
try: #尝试引用requests库
import requests
except ImportError: #若引用失败,则调系统pip命令安装
os.system('pip install requests')
import requests #请求web数据用的库
try: #尝试引用pywifi库
import pywifi
except ImportError: #若引用失败,则调系统pip命令安装
os.system('pip install comtypes')
os.system('pip install pywifi')
import pywifi #扫描wifi用库
try:
from bs4 import BeautifulSoup #解析html用这个
except ImportError: #若引用失败,则调系统pip命令安装
os.system('pip install beautifulsoup4')
os.system('pip install lxml')
from bs4 import BeautifulSoup #解析html用这个
import platform
import time
import re
import tkinter.filedialog #文件打开对话框
import threading #开线程,不让界面程卡死状态
import urllib #目前就用来解析一下//URL里面的域名
from tkinter import *
from tkinter import ttk #下拉菜单使用
from tkinter.ttk import Progressbar #进度条
from tkinter.scrolledtext import ScrolledText #带滚动条的文本输入控件
from requests.adapters import HTTPAdapter #重试次数
from ctypes import *
from sys import version #获取系统版本
Actor1 = 'https://测试链接.com/actors/WE4e' #篠田
ActorLink = Actor1
sort={0:'发布时间', 1:'评分', 2:'热度', 3:'想看人数', 4:'看过人数'}
sort2={'发布时间':0, '评分':1, '热度':2, '想看人数':3, '看过人数':4}
sortValue=['发布时间', '评分', '热度', '想看人数', '看过人数']
versionStr = 'Javdb爬演员影片磁力 V1.05'
URL_DOMAIN = ''
VideoNumber = 0
ActorVideioInfoList = {}
ActorName=''
LimitPageNum = 60 #网站有bug,只能到60页。不做限制的话会无限找下去。
htmlget = requests.Session() #全局变量,保持连接,提高访问速度
def print_Welcomeinfo():
print(' ______________________________________________________________________________ ')
print('| |')
print('| 基于这个版本 |')
print('| Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:57:54) |')
print('| [MSC v.1924 64 bit (AMD64)] on win32 |')
print('| |')
print('| 注1:请勿在公司使用,除非公司不审查流量 |')
print('| 注2:Check_Wifi_SSID()函数是通过wifi名称判断是不是在公司 |')
print('| 注3:输入演员的主页链接 |')
print('| 注4:随手写的,不关注条理性、效率 |')
print('|______________________________________________________________________________|\n\n')
return 0
#无线网SSID检查,毕竟有的公司对外网链接审查非常严格。这里还是主动判一下是不是在公司。
#当然,有线网就没办法了,自己注意吧。
def Check_Wifi_SSID():
Wifi_Blacklist = ['fang', 'sf', 'staff']
wifi = pywifi.PyWiFi() #抓取网卡接口
ifaces = wifi.interfaces()[0] #获取无线网卡
profile = ifaces.scan_results()[0] #获取无线网卡信息
Devname = ifaces.name() #获取网卡名字
ssid = profile.ssid.lower() #获取名字
for Blacklist in Wifi_Blacklist:
if Blacklist in ssid:
return 'forbid'
return 'allow'
#debug打印用
def debug_print_FileList(lists,flag):
print('============',flag,'============')
for item in lists:
print(item)
return 0
#调试用,保存请求到的html
def SaveHtmlFile(HtmlPath, HtmlData):
HtmlFile = open(HtmlPath, 'w',encoding='utf-8')
HtmlFile.write(HtmlData)
HtmlFile.close()
return 0
#从设置框里获取代理配置
def Get_ProxySetting():
ProxySetting = {}
ProxyType = ProxyType_comb.get()
ProxyIP = ProxyIP_Entry.get()
ProxyPort = ProxyPort_Entry.get()
ProxySetting['http']='%s://%s:%s'%(ProxyType,ProxyIP,ProxyPort)
ProxySetting['https']='%s://%s:%s'%(ProxyType,ProxyIP,ProxyPort)
return ProxySetting
#请求网页,返回html数据
def GetHtml(URL):
global htmlget
html = None
print('GetHtml(\'%s\')'%URL);
ProxySettings = {}
headers1 = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'}
headers2 = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.82'}
max_retry = int(RetryNum_label_Entry.get(),10)+1 #重试次数
timeout_s = int(timeout_label_Entry.get(),10) #超时时间
#####设置requests最大重试次数
#htmlget = requests.Session()
htmlget.mount('http://', HTTPAdapter(max_retries=max_retry-1)) ##设置http链接最大重试次数
htmlget.mount('https://', HTTPAdapter(max_retries=max_retry-1)) ##设置https链接最大重试次数
if ProxyEnable_val.get() == 1:
ProxySettings = Get_ProxySetting()
else:
ProxySettings = None
if Debug_val.get() == 1:
print('ProxySettings=',ProxySettings)
try:
html = htmlget.get(URL, headers=headers2, timeout=timeout_s, proxies = ProxySettings).content.decode('utf-8')
except:
html = None
return html
#【代理测试】按钮调用,用3个必须搭梯子访问的网址进行测试
def ProxyTest():
Disable_ProxySetting_All()
URL_LIST = ['https://www.google.com.hk', 'https://www.facebook.com', 'https://twitter.com']
Video_ParseRlt_textbox.delete(1.0, END)
for URL in URL_LIST:
html = GetHtml(URL)
if html != None:
print('代理连接正常',URL)
Video_ParseRlt_textbox.insert(END,URL.split('://')[1]+' 代理连接正常\n')
else:
print('代理连接失败',URL)
Video_ParseRlt_textbox.insert(END,URL.split('://')[1]+' 代理连接失败\n')
Reset_ProxySetting_All()
return 0
def GetFilterType(string):
FilterType = ['']
if '?' in string:
param = string.split('?',1)[1]
if 't=' in string:
FilterType = string.split('=',1)[1]
FilterType = FilterType.split('&',1)[0]
FilterType = FilterType.split(',')
return FilterType
def GetSortType(string):
SortType=0
if 'sort_type' in string:
SortType = string.split('sort_type=',1)[1]
SortType = int(SortType,10)
if SortType>4:
SortType = 4
return SortType
def print_UrlViewParam(SingleValue, MagnetValue, SortTypeValue):
if SingleValue == 1:
print('单体作品 ', end='')
if MagnetValue == 1:
print('有磁力 ', end='')
print(sort[SortTypeValue], end='')
return 0
def isActorLink(urllink):
if 'actors' in urllink.lower():
return 1
return 0
def GetUrlLink(Url):
UrlLink=''
UrlLink = Url.split('?')[0]
UrlLink = UrlLink.split('&')[0]
if UrlLink == None:
return ''
else:
return UrlLink
def SetElementStatus(Magnet,Single,SortType):
HaveMagnet_val.set(Magnet)
Single_val.set(Single)
SortType_comb.current(SortType)
def GetUrlViewParam(string):
param = None
Single=0
Magnet=0
#asd = re.findall("[^?]+$", string) 正则表达式,实在不太会用
FilterType = GetFilterType(string)
SortType = GetSortType(string)
#print(FilterType)
if 's' in FilterType:
Single=1
if 'd' in FilterType:
Magnet=1
#print('过滤规则(s=单体,d=有磁力):',FilterType)
#print('排序规则(0=发布时间 1=评分 2=热度 3=想看的人数 4=看过的人数):',SortType)
return Magnet,Single,SortType
def SetUrlViewParam(Url, Magnet, Single, SortType):
#print(Url, Magnet, Single, SortType)
URL_fine=Url
Magnet_v = {0:'', 1:'d'}
Single_v = {0:'', 1:'s'}
param = []
if Magnet == 1:
param = param + [Magnet_v[Magnet]]
if Single == 1:
param = param + [Single_v[Single]]
param = ','.join(param)
#print(param)
if Magnet == 1 or Single == 1:
URL_fine = URL_fine+'?t=%s'%(param)
#print(URL_fine)
if Magnet == 1 or Single == 1 or SortType != 0:
URL_fine = URL_fine+'&sort_type=%d'%(SortType)
return URL_fine
def ResetUrlViewParam():
Url = ActorURL_text.get()
UrlLink = GetUrlLink(Url)
Magnet = HaveMagnet_val.get()
Single = Single_val.get()
#print(SortType_comb.get())
SortType = sort2[SortType_comb.get()]
#print(SortType)
Url = SetUrlViewParam(UrlLink,Magnet,Single,SortType)
ActorURL_value.set(Url)
#print(Url)
return Url
def ResetUrlViewParam_EventBind(event):
Url = ActorURL_text.get()
UrlLink = GetUrlLink(Url)
Magnet = HaveMagnet_val.get()
Single = Single_val.get()
#print(SortType_comb.get())
SortType = sort2[SortType_comb.get()]
#print(SortType)
Url = SetUrlViewParam(UrlLink,Magnet,Single,SortType)
ActorURL_value.set(Url)
#print(Url)
return Url
def init_ElementStatus_EventBind(event):
url = ActorURL_text.get()
magnet,single,sortType = GetUrlViewParam(url)
SetElementStatus(magnet,single,sortType)
return;
#从html中获取磁力链接
def Get_video_magnet(SoupData):
magnet_link=''
magnet_node = SoupData.find('div', attrs={'class':'item columns is-desktop odd'})
if magnet_node == None:
#magnet_node = SoupData.find(class_="item columns is-desktop ")
magnet_node = SoupData.find('div', attrs={'class':'item columns is-desktop'})
if magnet_node != None:
if 'href' in magnet_node.a.attrs:
#print('磁力链接',magnet_node.a['href'])
magnet_link = magnet_node.a['href']
else:
if Debug_val.get() == 1:
print('\t\t无href属性')
pass
else:
if Debug_val.get() == 1:
print('\t\t无磁力链接')
pass
return magnet_link
#从html中获取演员列表,NotFound是未找到时的默认返回值
def Get_video_actors(SoupData,NotFound):
actors_list=[]
actors_nodes = SoupData.find_all(class_="symbol female") #找到所有女演员标记
if actors_nodes == None:
return [NotFound]
for actors_node in actors_nodes:
actors_previous_node = actors_node.previous_sibling #返回前一个节点
actors = actors_previous_node #前一个节点就是<a href="...."> </a>
actors_list.append(actors.get_text())
return actors_list
#从html中获取番号,NotFound是未找到时的默认返回值
def Get_video_codes(SoupData, NotFound):
video_codes=''
video_codes_copy_button = SoupData.find('a', attrs={'class':'button is-white copy-to-clipboard'})
if video_codes_copy_button == None:
return NotFound
video_codes = video_codes_copy_button.get('data-clipboard-text')
return video_codes
def Get_video_score(SoupData):
score=''
video_score_nodes = SoupData.find_all('div', attrs={'class':'panel-block'})
for video_score_node in video_score_nodes:
try:
if '評分' in video_score_node.strong.get_text():
score = video_score_node.span.get_text().strip().replace(' ','') #删除所有空格
if '由' in score:
score = score.replace('由','') #删除'由'
if '評價' in score:
score = score.replace('評價','') #删除'評價'
break
except:
pass
return score
def Get_video_minute(SoupData):
video_minute = 0
video_minute_nodes = SoupData.find_all('div', attrs={'class':'panel-block'})
for video_minute_node in video_minute_nodes:
try:
if '時長' in video_minute_node.strong.get_text():
str_minute = video_minute_node.span.get_text()
if 'N/A' in str_minute:
return 0
minute = str_minute.split(' ')[0].strip()
video_minute = int(minute,10)
break
except:
pass
return video_minute
#从html中获取番号,NotFound是未找到时的默认返回值
def Get_video_title(SoupData):
video_title=''
title_node = SoupData.find('strong', attrs={'class':'current-title'})
if title_node == None:
return ''
video_title = title_node.get_text()
return video_title
'''
远程模式 Get_Video_detail('remote', 'https://测试.com/v/55aEz')
本地HTML Get_Video_detail('local', 'CAWD-386.html')
'''
#从html中获取影片详情:番号、演员列表、评分、时长、磁力、标题
#番号其实不是必要的,在md文件中已经有了,这里是为了调试,在保存html文件的时候要用一下,所以顺便解出番号
def Get_Video_detail(RemoteMode,URL):
htmlData = None
vedio_code = ''
vedio_actors=[]
vedio_score=''
vedio_minute = 0
vedio_magnet=''
vedio_title=''
soup = None
if RemoteMode == 'remote': #远程模式
htmlData = GetHtml(URL)
if htmlData != None:
soup = BeautifulSoup(htmlData,features="lxml")
else: #本地html
soup = BeautifulSoup(open(URL,encoding='utf-8'), features="lxml")
if soup != None:
vedio_code = Get_video_codes(soup, '异常')
vedio_actors = Get_video_actors(soup, '异常')
vedio_score = Get_video_score(soup)
vedio_minute = Get_video_minute(soup)
vedio_magnet = Get_video_magnet(soup)
vedio_title = Get_video_title(soup)
if Debug_val.get() == 11111:
if RemoteMode == 'remote': #远程模式
if htmlData != None:
print('写\'%s.html\''%vedio_code)
SaveHtmlFile('%s.html'%vedio_code, htmlData)
'''
if Debug_val.get() == 1:
print("番号", vedio_code)
print("演员", vedio_actors)
print("磁力", vedio_magnet)
'''
return vedio_code,vedio_actors,vedio_score,vedio_minute,vedio_magnet,vedio_title
#代理总开关,修改代理类型下拉框、IP地址文本框、IP端口文本框、代理测试按钮的状态
def Init_ProxySetting_group():
if ProxyEnable_val.get() == 1: #如果启用代理
ProxyType_comb.config(state='readonly') #代理类型下拉框 可操作,但文本只读
ProxyIP_Entry.config(state='normal') #IP地址输入框 正常操作
ProxyPort_Entry.config(state='normal') #端口号输入框 正常操作
ProxyTest_btn.config(state='normal') #测试按钮 正常操作
else:
ProxyType_comb.config(state='disabled') #代理类型下拉框 禁止操作
ProxyIP_Entry.config(state='disabled') #IP地址输入框 禁止操作
ProxyPort_Entry.config(state='disabled') #端口号输入框 禁止操作
ProxyTest_btn.config(state='disabled') #测试按钮 禁止操作
return 0
#所有代理相关的禁止操作
def Disable_ProxySetting_All():
ProxyEnable_Button.config(state='disabled') #测试按钮 禁止操作
ProxyType_comb.config(state='disabled') #代理类型下拉框 禁止操作
ProxyIP_Entry.config(state='disabled') #IP地址输入框 禁止操作
ProxyPort_Entry.config(state='disabled') #端口号输入框 禁止操作
ProxyTest_btn.config(state='disabled') #测试按钮 禁止操作
timeout_label_Entry.config(state='disabled') #超时时间
RetryNum_label_Entry.config(state='disabled') #重试次数
return 0
#所有代理相关的禁止操作
def Reset_ProxySetting_All():
ProxyEnable_Button.config(state='normal') #测试按钮 恢复操作
timeout_label_Entry.config(state='normal') #超时时间
RetryNum_label_Entry.config(state='normal') #重试次数
Init_ProxySetting_group()
return 0
def Thread_creat(func):
'''将函数打包进线程'''
t = threading.Thread(target=func) # 创建
t.setDaemon(True) # 守护 !!!
t.start() # 启动
# 阻塞--卡死界面!
# t.join()
def GetVideoList_Code(VideoNodeData):
VideoCode = ''
VideoTitleNode = VideoNodeData.find('div', attrs={'video-title'})
if VideoTitleNode != None:
VideoCode = VideoTitleNode.strong.get_text()
#print(VideoCode)
return VideoCode
def GetVideoList_Date(VideoNodeData):
VideoDate = ''
VideoDateNode = VideoNodeData.find('div', attrs={'class':'meta'})
if VideoDateNode != None:
VideoDate = VideoDateNode.get_text().strip()
#print(VideoDate)
return VideoDate
def GetVideoList_URL(VideoNodeData):
VideoURL = ''
if 'href' in VideoNodeData.a.attrs:
VideoURL = VideoNodeData.a['href']
#print(VideoURL)
return VideoURL
def GetVideoList_Actor(SoupData):
#ActorNames=None
ActorSectionNames=''
ActorFormerNameNames=''
#获取'actor-section-name'演员名字节点的值,同样也支持清单
ActorSectionNameNode = SoupData.find('span', attrs={'class':'actor-section-name'})
if ActorSectionNameNode == None: #没找到,更可能是搜索页面 section-title title is-4
ActorSectionNameNode = SoupData.find('h2', attrs={'class':'section-title title is-4'})
ActorSectionNames = ActorSectionNameNode.get_text() #.strip()
ActorSectionNames = ActorSectionNames.split(' ')[1]
#print('ActorSectionNames=%s'%ActorSectionNames)
#os.system('pause')
else:
ActorSectionNames = ActorSectionNameNode.get_text() #.strip()
#获取<span class="section-meta" 演员曾用名节点的值
ActorFormerNameNodes = SoupData.find_all('span', attrs={'class':'section-meta'})
for ActorFormerNameNode in ActorFormerNameNodes:
if '部影片' not in ActorFormerNameNode.get_text():
ActorFormerNameNames = ActorFormerNameNode.get_text() #.strip()
break
ActorSectionName = ActorSectionNames.split(',')
ActorFormerNameName = ActorFormerNameNames.split(',')
ActorNames = ActorSectionName+ActorFormerNameName
if '' in ActorNames:
ActorNames.remove('')
for i in range(len(ActorNames)):
ActorNames[i] = ActorNames[i].strip()
ActorNames = '&'.join(ActorNames)
print(ActorNames)
return ActorNames
#获取演员主页里面每个影片的数据
def GetActorVideoList(SoupData):
global URL_DOMAIN
global VideoNumber
global ActorVideioInfoList
global ActorName
if ActorName == '':
ActorName = GetVideoList_Actor(SoupData)
VideoListNode = None
VideoListNode = SoupData.find('div', attrs={'class':'movie-list h cols-4 vcols-8'})
if VideoListNode == None:
VideoListNode = SoupData.find('div', attrs={'class':'movie-list h cols-4 vcols-5'})
if VideoListNode == None:
print('“movie-list h cols-4 vcols-8”或“movie-list h cols-4 vcols-5”列表节点都没找到')
if VideoListNode != None:
VideoNodes = VideoListNode.find_all('div', attrs={'class':'item'})
for VideoNode in VideoNodes:
VideoCode = GetVideoList_Code(VideoNode)
VideoDate = GetVideoList_Date(VideoNode)
VideoURL = GetVideoList_URL(VideoNode)
#print(VideoCode, VideoDate, VideoURL)
ActorVideioInfoList[VideoCode] = [VideoDate, '','', '', '', URL_DOMAIN+VideoURL, ''] #日期,人数,演员列表,评分,时长,链接, 片名
Video_list_textbox.insert(END,VideoCode+'\t'+VideoDate+'\n')
VideoNumber = VideoNumber+1
NextPage = SoupData.find('a', attrs={'class':'pagination-next'})
if Debug_val.get() == 1:
return 0
if NextPage != None: #有下一页
NextURL = NextPage.get('href')
print('下一页',NextURL)
GetAvtorPage(URL_DOMAIN+NextURL)
return 0
#获取演员主页
def GetAvtorPage(Actor_URL):
global URL_DOMAIN
soup = None
print('[%s]'%Actor_URL)
#return 0
htmlData = GetHtml(Actor_URL)
if Debug_val.get() == 1:
print(htmlData)
return 0
if htmlData != None:
print('htmlData is OK')
soup = BeautifulSoup(htmlData,features="lxml")
else:
print('htmlData is None')
if soup != None:
print('soup is OK')
GetActorVideoList(soup)
if 0:
if htmlData != None:
print('写\'%s.html\''%vedio_code)
SaveHtmlFile('%s.html'%vedio_code, htmlData)
else:
print('soup is None')
return 0
#创建excel文件
def Creat_Excel():
#global AllFileList
#global AllFileList_bak
global ActorVideioInfoList
global ActorName
print('*****************ActorName=[%s]*********************'%ActorName)
#ActorName = '1111111'
excelfile = ActorName+'.xlsx'
df1 = pd.DataFrame.from_dict(ActorVideioInfoList,orient='index',columns=['日期', '人数', '演员', '评分', '时长', 'URL', '片名']) #番号列作为索引了
writer = pd.ExcelWriter(excelfile)
df1.to_excel(excelfile)
print('保存完成,共%d个'%len(ActorVideioInfoList))
ActorVideioInfoList.clear()
return 0
#打开演员URL按钮事件
def OpenActorURL():
global URL_DOMAIN
global ActorVideioInfoList
global ActorName
ActorName=''
Video_list_textbox.delete(1.0, END) #清空 删除 所有文本
ActorURL = ActorURL_value.get()
if Debug_val.get() == 1:
URL_DOMAIN='https://javdb008.com/'
else:
URL_data = urllib.parse.urlparse(ActorURL)
URL_DOMAIN = URL_data.scheme + '://' + URL_data.netloc
print(URL_DOMAIN)
#一上来就先判是否解析URL,如果解析URL的话,是否是链接的公司wifi
if Check_Wifi_SSID() == 'forbid':
print('此Wifi被禁止联网解析,更换AP')
return 0
GetAvtorPage(ActorURL)
video_total_num = len(ActorVideioInfoList)
if video_total_num == 0:
print('哪里失败了')
return 0
print('视频总数%d'%video_total_num)
progressbar1['maximum'] = video_total_num
print(ActorVideioInfoList)
n = 0
m = 0
Video_ParseRlt_textbox.delete(1.0, END) #清空 删除 所有文本
for video in ActorVideioInfoList:
vediocode, actors, score, minute, magnetlink, title = Get_Video_detail('remote', ActorVideioInfoList[video][5])
ActorVideioInfoList[video][1] = len(actors)
ActorVideioInfoList[video][2] = '&'.join(actors)
ActorVideioInfoList[video][3] = score
ActorVideioInfoList[video][4] = minute
if magnetlink != '':
ActorVideioInfoList[video][5] = magnetlink
Video_ParseRlt_textbox.insert(END,vediocode+' [√]'+'\n')
m = m + 1
else:
Video_ParseRlt_textbox.insert(END,vediocode+' [×]'+'\n')
ActorVideioInfoList[video][6] = title
n = n+1
progressbar1['value'] = n
print(video, ActorVideioInfoList[video])
Creat_Excel()
Video_ParseRlt_textbox.insert(END,'总共%d个,有效%d个'%(video_total_num,m)) #插入文本
return 0
print_Welcomeinfo()
OS = platform.system()
top = Tk()
top.title(versionStr)
top.resizable(False, False) #禁止调整窗口大小
top.geometry("480x526")
axis_y = 1
Line_High = 24
#演员链接显示框
ActorURL_value = StringVar()
ActorURL_value.set(ActorLink)
ActorURL_text = Entry(textvariable = ActorURL_value)
ActorURL_text.place(x=1, y=1, width=418, height=Line_High)
#演员链接按钮
OpenActorURL_btn = Button()
OpenActorURL_btn.config(text='打开链接', command=lambda:Thread_creat(OpenActorURL))
OpenActorURL_btn.place(x=420, y=axis_y, width=59, height=Line_High)
axis_y = axis_y+Line_High
Line_High = 24
#有磁力筛选开关
HaveMagnet_val = IntVar()
HaveMagnet_val.set(0)
HaveMagnet_Button = Checkbutton(top, text = "有磁力", anchor='w', relief='groove', variable = HaveMagnet_val, onvalue=1, offvalue=0, command=ResetUrlViewParam)
HaveMagnet_Button.place(x=1, y=axis_y, width=479, height=Line_High+2*2)
#仅单体作品开关
Single_val = IntVar()
Single_val.set(0)
Single_Button = Checkbutton(top, text = "单体作品", anchor='w', variable = Single_val, onvalue=1, offvalue=0, command=ResetUrlViewParam)
Single_Button.place(x=72, y=axis_y+2, width=100, height=Line_High)
#排序方式:下拉菜单
RetryNum_label = Label(text='排序方式:', anchor='w')
RetryNum_label.place(x=162, y=axis_y+2, width=60, height=Line_High)
SortType_comb = ttk.Combobox(state="readonly")
SortType_comb.bind('<<ComboboxSelected>>', ResetUrlViewParam_EventBind)
SortType_comb.place(x=222, y=axis_y+2, width=70, height=Line_High)
SortType_comb['value'] = (sortValue)
SortType_comb.current(0)
#时长限制:单行文本框
TimeLimit_Label = Label(text='片长限制(分钟):', anchor='w')
TimeLimit_Label.place(x=346, y=axis_y+2, width=85, height=24)
TimeLimit_text = StringVar()
TimeLimit_text.set('180')
TimeLimit_Entry = Entry(textvariable = TimeLimit_text)
TimeLimit_Entry.place(x=438, y=axis_y+2, width=39, height=Line_High)
axis_y = axis_y+Line_High+2
Line_High = 24
#代理开关
ProxyEnable_val = IntVar()
ProxyEnable_val.set(0)
ProxyEnable_Button = Checkbutton(top, text = "启用代理", anchor='w', relief='groove', variable = ProxyEnable_val, onvalue=1, offvalue=0, command=Init_ProxySetting_group)
ProxyEnable_Button.place(x=1, y=axis_y, width=479, height=Line_High+4)
#代理类型:下拉菜单
ProxyType_comb = ttk.Combobox(state="readonly")
ProxyType_comb.place(x=124, y=axis_y+2, width=70, height=Line_High)
ProxyType_comb['value'] = ('http','https','socks4','socks5')
ProxyType_comb.current(0)
#代理地址:单行文本框
ProxyIP_text = StringVar()
ProxyIP_text.set('127.0.0.1')
ProxyIP_Entry = Entry(textvariable = ProxyIP_text)
ProxyIP_Entry.place(x=195, y=axis_y+2, width=174, height=Line_High)
#代理端口
ProxyPort_text = StringVar()
ProxyPort_text.set('7890')
ProxyPort_Entry = Entry(textvariable = ProxyPort_text)
ProxyPort_Entry.place(x=369, y=axis_y+2, width=50, height=Line_High)
#测试按钮
ProxyTest_btn = Button()
#ProxyTest_btn.config(text='代理测试', command=ProxyTest)
ProxyTest_btn.config(text='代理测试', command=lambda:Thread_creat(ProxyTest))
ProxyTest_btn.place(x=420, y=axis_y+2, width=59, height=Line_High)
Init_ProxySetting_group() #初始化代理相关控件的状态
axis_y = axis_y+Line_High+2
Line_High = 24
#就是一个边框
timeout_label1111 = Label(text='',relief=GROOVE).place(x=1, y=axis_y, width=479, height=Line_High+4)
#超时时间
timeout_label = Label(text='超时时间(秒):', anchor='w')
timeout_label.place(x=2, y=axis_y+2, width=80, height=24)
timeout_label_text = StringVar()
timeout_label_text.set(2)
timeout_label_Entry = Entry(textvariable = timeout_label_text)
timeout_label_Entry.place(x=80, y=axis_y+2, width=30, height=24)
#重试次数
RetryNum_label = Label(text='重试次数:', anchor='w')
RetryNum_label.place(x=115, y=axis_y+2, width=60, height=24)
RetryNum_label_text = StringVar()
RetryNum_label_text.set(6)
RetryNum_label_Entry = Entry(textvariable = RetryNum_label_text)
RetryNum_label_Entry.place(x=174, y=axis_y+2, width=30, height=24)
#Debug开关
Debug_val = IntVar()
Debug_val.set(0)
Debug_Button = Checkbutton(top, text = "Debug", anchor='w', variable = Debug_val, onvalue=1, offvalue=0)
Debug_Button.place(x=410, y=axis_y+2, width=100, height=24)
axis_y = axis_y+Line_High+4
Line_High = 400
#文件过滤展示框
Video_list_textbox = ScrolledText(relief="solid")
Video_list_textbox.place(x=1, y=axis_y, width=200, height=Line_High)
#文件夹内的文件展示
Video_ParseRlt_textbox = ScrolledText(relief="solid")
Video_ParseRlt_textbox.place(x=204, y=axis_y, width=272, height=Line_High)
axis_y = axis_y+Line_High
Line_High = 20
#进度条
progressbar1 = Progressbar(top, orient='horizontal', length=250, mode='determinate')
progressbar1.place(x=1, y=axis_y, width=478, height=Line_High)
progressbar1['maximum'] = 250
progressbar1['value'] = 0
top.mainloop()
更新记录
V1.06 增加对搜索页面的支持
V1.05 增加对马儿走丢的支持,增加attrs={‘class’:‘movie-list h cols-4 vcols-5’}判断
4万+





