diff --git a/jtx260110/py/AppToV5.py b/jtx260110/py/AppToV5.py new file mode 100644 index 0000000..a0830b2 --- /dev/null +++ b/jtx260110/py/AppToV5.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +# 本资源来源于互联网公开渠道,仅可用于个人学习及爬虫技术交流。 +# 严禁将其用于任何商业用途,下载后请于 24 小时内删除,搜索结果均来自源站,本人不承担任何责任。 +""" +{ + "key": "xxx", + "name": "xxx", + "type": 3, + "api": "./ApptoV5无加密.py", + "ext": "http://domain.com" +} +""" + +import re,sys,uuid +from base.spider import Spider +sys.path.append('..') + +class Spider(Spider): + host,config,local_uuid,parsing_config = '','','',[] + headers = { + 'User-Agent': "Dart/2.19 (dart:io)", + 'Accept-Encoding': "gzip", + 'appto-local-uuid': local_uuid + } + + def init(self, extend=''): + try: + host = extend.strip() + if not host.startswith('http'): + return {} + if not re.match(r'^https?://[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*(:\d+)?/?$', host): + host_=self.fetch(host).json() + self.host = host_['domain'] + else: + self.host = host + self.local_uuid = str(uuid.uuid4()) + response = self.fetch(f'{self.host}/apptov5/v1/config/get?p=android&__platform=android', headers=self.headers).json() + config = response['data'] + self.config = config + parsing_conf = config['get_parsing']['lists'] + parsing_config = {} + for i in parsing_conf: + if len(i['config']) != 0: + label = [] + for j in i['config']: + if j['type'] == 'json': + label.append(j['label']) + parsing_config.update({i['key']:label}) + self.parsing_config = parsing_config + return None + except Exception as e: + print(f'初始化异常:{e}') + return {} + + def detailContent(self, ids): + response = self.fetch(f"{self.host}/apptov5/v1/vod/getVod?id={ids[0]}",headers=self.headers).json() + data3 = response['data'] + videos = [] + vod_play_url = '' + vod_play_from = '' + for i in data3['vod_play_list']: + play_url = '' + for j in i['urls']: + play_url += f"{j['name']}${i['player_info']['from']}@{j['url']}#" + vod_play_from += i['player_info']['show'] + '$$$' + vod_play_url += play_url.rstrip('#') + '$$$' + vod_play_url = vod_play_url.rstrip('$$$') + vod_play_from = vod_play_from.rstrip('$$$') + videos.append({ + 'vod_id': data3.get('vod_id'), + 'vod_name': data3.get('vod_name'), + 'vod_content': data3.get('vod_content'), + 'vod_remarks': data3.get('vod_remarks'), + 'vod_director': data3.get('vod_director'), + 'vod_actor': data3.get('vod_actor'), + 'vod_year': data3.get('vod_year'), + 'vod_area': data3.get('vod_area'), + 'vod_play_from': vod_play_from, + 'vod_play_url': vod_play_url + }) + return {'list': videos} + + def searchContent(self, key, quick, pg='1'): + url = f"{self.host}/apptov5/v1/search/lists?wd={key}&page={pg}&type=&__platform=android" + response = self.fetch(url, headers=self.headers).json() + data = response['data']['data'] + for i in data: + if i.get('vod_pic').startswith('mac://'): + i['vod_pic'] = i['vod_pic'].replace('mac://', 'http://', 1) + return {'list': data, 'page': pg, 'total': response['data']['total']} + + def playerContent(self, flag, id, vipflags): + default_ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1' + parsing_config = self.parsing_config + parts = id.split('@') + if len(parts) != 2: + return {'parse': 0, 'url': id, 'header': {'User-Agent': default_ua}} + playfrom, rawurl = parts + label_list = parsing_config.get(playfrom) + if not label_list: + return {'parse': 0, 'url': rawurl, 'header': {'User-Agent': default_ua}} + result = {'parse': 1, 'url': rawurl, 'header': {'User-Agent': default_ua}} + for label in label_list: + payload = { + 'play_url': rawurl, + 'label': label, + 'key': playfrom + } + try: + response = self.post( + f"{self.host}/apptov5/v1/parsing/proxy?__platform=android", + data=payload, + headers=self.headers + ).json() + except Exception as e: + print(f"请求异常: {e}") + continue + if not isinstance(response, dict): + continue + if response.get('code') == 422: + continue + data = response.get('data') + if not isinstance(data, dict): + continue + url = data.get('url') + if not url: + continue + ua = data.get('UA') or data.get('UserAgent') or default_ua + result = { + 'parse': 0, + 'url': url, + 'header': {'User-Agent': ua} + } + break + return result + + def homeContent(self, filter): + config = self.config + if not config: + return {} + home_cate = config['get_home_cate'] + classes = [] + for i in home_cate: + if isinstance(i.get('extend', []),dict): + classes.append({'type_id': i['cate'], 'type_name': i['title']}) + return {'class': classes} + + def homeVideoContent(self): + response = self.fetch(f'{self.host}/apptov5/v1/home/data?id=1&mold=1&__platform=android',headers=self.headers).json() + data = response['data'] + vod_list = [] + for i in data['sections']: + for j in i['items']: + vod_pic = j.get('vod_pic') + if vod_pic.startswith('mac://'): + vod_pic = vod_pic.replace('mac://', 'http://', 1) + vod_list.append({ + "vod_id": j.get('vod_id'), + "vod_name": j.get('vod_name'), + "vod_pic": vod_pic, + "vod_remarks": j.get('vod_remarks') + }) + return {'list': vod_list} + + def categoryContent(self, tid, pg, filter, extend): + response = self.fetch(f"{self.host}/apptov5/v1/vod/lists?area={extend.get('area','')}&lang={extend.get('lang','')}&year={extend.get('year','')}&order={extend.get('sort','time')}&type_id={tid}&type_name=&page={pg}&pageSize=21&__platform=android", headers=self.headers).json() + data = response['data'] + data2 = data['data'] + for i in data['data']: + if i.get('vod_pic','').startswith('mac://'): + i['vod_pic'] = i['vod_pic'].replace('mac://', 'http://', 1) + return {'list': data2, 'page': pg, 'total': data['total']} + + def getName(self): + pass + + def isVideoFormat(self, url): + pass + + def manualVideoCheck(self): + pass + + def destroy(self): + pass + + def localProxy(self, param): + pass \ No newline at end of file diff --git a/jtx260110/py/jieyingshi.py b/jtx260110/py/jieyingshi.py new file mode 100644 index 0000000..892e829 --- /dev/null +++ b/jtx260110/py/jieyingshi.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# @Author : Doubebly +# @Time : 2025/1/21 23:07 + +import hashlib +import re +import sys +import time +import requests +sys.path.append('..') +from base.spider import Spider + + +class Spider(Spider): + def getName(self): + return "JieYingShi" + + def init(self, extend): + self.home_url = 'https://www.hkybqufgh.com' + self.error_url = 'https://json.doube.eu.org/error/4gtv/index.m3u8' + self.headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + + def getDependence(self): + return [] + + def isVideoFormat(self, url): + pass + + def manualVideoCheck(self): + pass + + def homeContent(self, filter): + + return {'class': [ + { + 'type_id': '1', + 'type_name': '电影' + }, + { + 'type_id': '2', + 'type_name': '电视剧' + }, + { + 'type_id': '4', + 'type_name': '动漫' + }, + { + 'type_id': '3', + 'type_name': '综艺' + } + ]} + + def homeVideoContent(self): + a = self.get_data(self.home_url) + return {'list': a, 'parse': 0, 'jx': 0} + + def categoryContent(self, cid, page, filter, ext): + url = self.home_url + f'/vod/show/id/{cid}/page/{page}' + data = self.get_data(url) + return {'list': data, 'parse': 0, 'jx': 0} + + + def detailContent(self, did): + ids = did[0] + data = self.get_detail_data(ids) + return {"list": data, 'parse': 0, 'jx': 0} + + def searchContent(self, key, quick, page='1'): + if int(page) > 1: + return {'list': [], 'parse': 0, 'jx': 0} + url = self.home_url + f'/vod/search/{key}' + data = self.get_data(url) + return {'list': data, 'parse': 0, 'jx': 0} + + def playerContent(self, flag, pid, vipFlags): + url = self.get_play_data(pid) + return {"url": url, "header": self.headers, "parse": 1, "jx": 0} + + def localProxy(self, params): + pass + + def destroy(self): + return '正在Destroy' + + + def get_data(self, url): + data = [] + try: + res = requests.get(url, headers=self.headers) + if res.status_code != 200: + return data + vod_id_s = re.findall(r'\\"vodId\\":(.*?),', res.text) + vod_name_s = re.findall(r'\\"vodName\\":\\"(.*?)\\"', res.text) + vod_pic_s = re.findall(r'\\"vodPic\\":\\"(.*?)\\"', res.text) + vod_remarks_s = re.findall(r'\\"vodRemarks\\":\\"(.*?)\\"', res.text) + + for i in range(len(vod_id_s)): + data.append( + { + 'vod_id': vod_id_s[i], + 'vod_name': vod_name_s[i], + 'vod_pic': vod_pic_s[i], + 'vod_remarks': vod_remarks_s[i], + } + ) + except requests.RequestException as e: + print(e) + return data + + def get_detail_data(self, ids): + url = self.home_url + f'/api/mw-movie/anonymous/video/detail?id={ids}' + t = str(int(time.time() * 1000)) + headers = self.get_headers(t, f'id={ids}&key=cb808529bae6b6be45ecfab29a4889bc&t={t}') + try: + res = requests.get(url, headers=headers) + if res.status_code != 200: + return [] + i = res.json()['data'] + urls = [] + for ii in res.json()['data']['episodeList']: + name = ii['name'] + url = ii['nid'] + urls.append(f'{name}${ids}-{url}') + data = { + 'type_name': i['vodClass'], + 'vod_id': i['vodId'], + 'vod_name': i['vodName'], + 'vod_remarks': i['vodRemarks'], + 'vod_year': i['vodYear'], + 'vod_area': i['vodArea'], + 'vod_actor': i['vodActor'], + 'vod_director': i['vodDirector'], + 'vod_content': i['vodContent'], + 'vod_play_from': '🌈七星专享', + 'vod_play_url': '#'.join(urls), + + } + return [data] + + except requests.RequestException as e: + print(e) + return [] + + def get_play_data(self, play): + info = play.split('-') + _id = info[0] + _pid = info[1] + url = self.home_url + f'/api/mw-movie/anonymous/v2/video/episode/url?id={_id}&nid={_pid}' + t = str(int(time.time() * 1000)) + headers = self.get_headers(t, f'id={_id}&nid={_pid}&key=cb808529bae6b6be45ecfab29a4889bc&t={t}') + try: + res = requests.get(url, headers=headers) + if res.status_code != 200: + return self.error_url + return res.json()['data']['list'][0]['url'] + except requests.RequestException as e: + print(e) + return self.error_url + + @staticmethod + def get_headers(t, e): + sign = hashlib.sha1(hashlib.md5(e.encode()).hexdigest().encode()).hexdigest() + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', + 'Accept': 'application/json, text/plain, */*', + 'sign': sign, + 'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"', + 't': t, + 'referer': 'https://www.hkybqufgh.com/', + } + return headers + +if __name__ == '__main__': + pass + + + + diff --git a/jtx260110/py/哔哩直播.py b/jtx260110/py/哔哩直播.py new file mode 100644 index 0000000..fbf2c77 --- /dev/null +++ b/jtx260110/py/哔哩直播.py @@ -0,0 +1,314 @@ +# coding=utf-8 +# !/usr/bin/python + +""" + +作者 丢丢喵 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容 + ====================Diudiumiao==================== + +""" + +from Crypto.Util.Padding import unpad +from Crypto.Util.Padding import pad +from urllib.parse import unquote +from Crypto.Cipher import ARC4 +from urllib.parse import quote +from base.spider import Spider +from Crypto.Cipher import AES +from datetime import datetime +from bs4 import BeautifulSoup +from base64 import b64decode +import urllib.request +import urllib.parse +import datetime +import binascii +import requests +import base64 +import json +import time +import sys +import re +import os + +sys.path.append('..') + +xurl = "https://search.bilibili.com" + +xurl1 = "https://api.live.bilibili.com" + +headerx = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0' + } + +class Spider(Spider): + global xurl + global xurl1 + global headerx + + def getName(self): + return "首页" + + def init(self, extend): + pass + + def isVideoFormat(self, url): + pass + + def manualVideoCheck(self): + pass + + def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''): + if pl == 3: + plx = [] + while True: + start_index = text.find(start_str) + if start_index == -1: + break + end_index = text.find(end_str, start_index + len(start_str)) + if end_index == -1: + break + middle_text = text[start_index + len(start_str):end_index] + plx.append(middle_text) + text = text.replace(start_str + middle_text + end_str, '') + if len(plx) > 0: + purl = '' + for i in range(len(plx)): + matches = re.findall(start_index1, plx[i]) + output = "" + for match in matches: + match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1]) + if match3: + number = match3.group(1) + else: + number = 0 + if 'http' not in match[0]: + output += f"#{match[1]}${number}{xurl}{match[0]}" + else: + output += f"#{match[1]}${number}{match[0]}" + output = output[1:] + purl = purl + output + "$$$" + purl = purl[:-3] + return purl + else: + return "" + else: + start_index = text.find(start_str) + if start_index == -1: + return "" + end_index = text.find(end_str, start_index + len(start_str)) + if end_index == -1: + return "" + + if pl == 0: + middle_text = text[start_index + len(start_str):end_index] + return middle_text.replace("\\", "") + + if pl == 1: + middle_text = text[start_index + len(start_str):end_index] + matches = re.findall(start_index1, middle_text) + if matches: + jg = ' '.join(matches) + return jg + + if pl == 2: + middle_text = text[start_index + len(start_str):end_index] + matches = re.findall(start_index1, middle_text) + if matches: + new_list = [f'{item}' for item in matches] + jg = '$$$'.join(new_list) + return jg + + def homeContent(self, filter): + result = {} + result = {"class": [{"type_id": "舞", "type_name": "舞蹈"}, + {"type_id": "音乐", "type_name": "音乐"}, + {"type_id": "手游", "type_name": "手游"}, + {"type_id": "网游", "type_name": "网游"}, + {"type_id": "单机游戏", "type_name": "单机游戏"}, + {"type_id": "虚拟主播", "type_name": "虚拟主播"}, + {"type_id": "电台", "type_name": "电台"}, + {"type_id": "体育", "type_name": "体育"}, + {"type_id": "聊天", "type_name": "聊天"}, + {"type_id": "娱乐", "type_name": "娱乐"}, + {"type_id": "电影", "type_name": "影视"}, + {"type_id": "新闻", "type_name": "新闻"}] + } + + return result + + def homeVideoContent(self): + pass + + def categoryContent(self, cid, pg, filter, ext): + result = {} + videos = [] + + if pg: + page = int(pg) + else: + page = 1 + + url = f'{xurl}/live?keyword={cid}&page={str(page)}' + detail = requests.get(url=url, headers=headerx) + detail.encoding = "utf-8" + res = detail.text + doc = BeautifulSoup(res, "lxml") + + soups = doc.find_all('div', class_="video-list-item") + + for vod in soups: + + names = vod.find('h3', class_="bili-live-card__info--tit") + name = names.text.strip().replace('直播中', '') + + id = names.find('a')['href'] + id = self.extract_middle_text(id, 'bilibili.com/', '?', 0) + + pic = vod.find('img')['src'] + if 'http' not in pic: + pic = "https:" + pic + + remarks = vod.find('a', class_="bili-live-card__info--uname") + remark = remarks.text.strip() + + video = { + "vod_id": id, + "vod_name": name, + "vod_pic": pic, + "vod_remarks": remark + } + videos.append(video) + + result = {'list': videos} + result['page'] = pg + result['pagecount'] = 9999 + result['limit'] = 90 + result['total'] = 999999 + return result + + def detailContent(self, ids): + did = ids[0] + result = {} + videos = [] + xianlu = '' + bofang = '' + + url = f'{xurl1}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={did}&platform=web&protocol=0,1&format=0,1,2&codec=0,1' + detail = requests.get(url=url, headers=headerx) + detail.encoding = "utf-8" + data = detail.json() + + content = '欢迎观看哔哩直播' + + setup = data['data']['playurl_info']['playurl']['stream'] + + nam = 0 + + for vod in setup: + + try: + host = vod['format'][nam]['codec'][0]['url_info'][1]['host'] + except (KeyError, IndexError): + continue + + base = vod['format'][nam]['codec'][0]['base_url'] + + extra = vod['format'][nam]['codec'][0]['url_info'][1]['extra'] + + id = host + base + extra + + nam = nam + 1 + + namc = f"{nam}号线路" + + bofang = bofang + namc + '$' + id + '#' + + bofang = bofang[:-1] + + xianlu = '哔哩专线' + + videos.append({ + "vod_id": did, + "vod_content": content, + "vod_play_from": xianlu, + "vod_play_url": bofang + }) + + result['list'] = videos + return result + + def playerContent(self, flag, id, vipFlags): + + result = {} + result["parse"] = 0 + result["playUrl"] = '' + result["url"] = id + result["header"] = headerx + return result + + def searchContentPage(self, key, quick, pg): + result = {} + videos = [] + + if pg: + page = int(pg) + else: + page = 1 + + url = f'{xurl}/live?keyword={key}&page={str(page)}' + detail = requests.get(url=url, headers=headerx) + detail.encoding = "utf-8" + res = detail.text + doc = BeautifulSoup(res, "lxml") + + soups = doc.find_all('div', class_="video-list-item") + + for vod in soups: + + names = vod.find('h3', class_="bili-live-card__info--tit") + name = names.text.strip().replace('直播中', '') + + id = names.find('a')['href'] + id = self.extract_middle_text(id, 'bilibili.com/', '?', 0) + + pic = vod.find('img')['src'] + if 'http' not in pic: + pic = "https:" + pic + + remarks = vod.find('a', class_="bili-live-card__info--uname") + remark = remarks.text.strip() + + video = { + "vod_id": id, + "vod_name": name, + "vod_pic": pic, + "vod_remarks": remark + } + videos.append(video) + + result['list'] = videos + result['page'] = pg + result['pagecount'] = 9999 + result['limit'] = 90 + result['total'] = 999999 + return result + + def searchContent(self, key, quick, pg="1"): + return self.searchContentPage(key, quick, '1') + + def localProxy(self, params): + if params['type'] == "m3u8": + return self.proxyM3u8(params) + elif params['type'] == "media": + return self.proxyMedia(params) + elif params['type'] == "ts": + return self.proxyTs(params) + return None + + + + + + + +