python爬虫爬取某网站视频的示例代码

把获取到的下载视频的url存放在数组中(也可写入文件中),通过调用迅雷接口,进行自动下载。(请先下载迅雷,并在其设置中心的下载管理中设置为一键下载)

实现代码如下:

from bs4 import BeautifulSoupimport requestsimport os,re,timeimport urllib3from win32com.client import Dispatchclass DownloadVideo:  def __init__(self):    self.r = requests.session()    self.url=self.get_url()    self.download_urla=[]    self.download_urlb=[]    self.url_set=["%s/shipin/list-短视频.html"%self.url]  #获取最新网址  def get_url(self):    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)    a=self.r.get('https://www.k58.com',verify=False)    b=a.url    return b  #几页内容的网址  def url_set1(self,n):    if n==2:      url="%s/shipin/list-短视频-2.html"%self.url      self.url_set.append(url)    elif n>=3:      m=n+1      for i in range(2,m):        url="%s/shipin/list-短视频-%d.html"%(self.url,i)        self.url_set.append(url)    else:      pass  #分别加载每一个页内容的网址  def download_url1(self):    for j in self.url_set:      urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)      r=self.r.get(j,verify=False)      sp1=r.content      soup = BeautifulSoup(sp1, "html.parser")      sp2 = soup.find_all(class_="shown")      for i in sp2:        url1=re.findall('<a href="(.*?)" rel="external nofollow" ',str(i))        u=self.url+url1[0]        self.download_urla.append(u)  #分别获取各个视频的下载链接  def download_url2(self):    for i in self.download_urla:      urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)      r=self.r.get(i,verify=False)      sp1=r.content      soup = BeautifulSoup(sp1, "html.parser")      sp2 = soup.find_all(class_="form-control input-sm copy_btn app_disable")      for j in sp2:        url2=j["data-clipboard-text"]        self.download_urlb.append(url2)        #将链接写入txt中        # self.write_txt(url2)  #迅雷下载  def thunder_download(self):    try:      thunder = Dispatch("ThunderAgent.Agent64.1")      for i in self.download_urlb:        thunder.AddTask(i)        thunder.CommitTasks()        time.sleep(2)    except:      print("请下载迅雷,并在其设置中心的下载管理中设置为一键下载")  def mkdir(self,path):    folder = os.path.exists(path)    if not folder:      os.makedirs(path)    else:      pass  def write_txt(self,c):    self.mkdir(r"D:/AAAAA")    file_name=time.strftime('%Y%m%d_%H%M%S.txt')    with open(r"D:/AAAAA/%s"%file_name,'a') as f:      f.write(c+"/n")if __name__ == '__main__':  d=DownloadVideo()  #数字表示几页的内容  d.url_set1(5)  d.download_url1()  d.download_url2()  d.thunder_download()

到此这篇关于python爬虫爬取某网站视频的示例代码的文章就介绍到这了,更多相关python爬虫爬取网站视频内容请搜索 以前的文章或继续浏览下面的相关文章希望大家以后多多支持 !

相关文章

发表新评论