上传文件至「jtx260111/api」
This commit is contained in:
329
jtx260111/api/JWDJ.py
Normal file
329
jtx260111/api/JWDJ.py
Normal file
@@ -0,0 +1,329 @@
|
||||
# coding=utf-8
|
||||
# !/usr/bin/python
|
||||
|
||||
"""
|
||||
|
||||
作者 丢丢喵推荐 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
|
||||
====================Diudiumiao====================
|
||||
|
||||
"""
|
||||
|
||||
from Crypto.Util.Padding import unpad
|
||||
from Crypto.Util.Padding import pad
|
||||
from urllib.parse import unquote
|
||||
from Crypto.Cipher import ARC4
|
||||
from urllib.parse import quote
|
||||
from base.spider import Spider
|
||||
from Crypto.Cipher import AES
|
||||
from datetime import datetime
|
||||
from bs4 import BeautifulSoup
|
||||
from base64 import b64decode
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import datetime
|
||||
import binascii
|
||||
import requests
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
|
||||
sys.path.append('..')
|
||||
|
||||
xurl = "https://djw1.com"
|
||||
|
||||
headerx = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
|
||||
}
|
||||
|
||||
class Spider(Spider):
|
||||
global xurl
|
||||
global headerx
|
||||
|
||||
def getName(self):
|
||||
return "首页"
|
||||
|
||||
def init(self, extend):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
|
||||
if pl == 3:
|
||||
plx = []
|
||||
while True:
|
||||
start_index = text.find(start_str)
|
||||
if start_index == -1:
|
||||
break
|
||||
end_index = text.find(end_str, start_index + len(start_str))
|
||||
if end_index == -1:
|
||||
break
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
plx.append(middle_text)
|
||||
text = text.replace(start_str + middle_text + end_str, '')
|
||||
if len(plx) > 0:
|
||||
purl = ''
|
||||
for i in range(len(plx)):
|
||||
matches = re.findall(start_index1, plx[i])
|
||||
output = ""
|
||||
for match in matches:
|
||||
match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
|
||||
if match3:
|
||||
number = match3.group(1)
|
||||
else:
|
||||
number = 0
|
||||
if 'http' not in match[0]:
|
||||
output += f"#{match[1]}${number}{xurl}{match[0]}"
|
||||
else:
|
||||
output += f"#{match[1]}${number}{match[0]}"
|
||||
output = output[1:]
|
||||
purl = purl + output + "$$$"
|
||||
purl = purl[:-3]
|
||||
return purl
|
||||
else:
|
||||
return ""
|
||||
else:
|
||||
start_index = text.find(start_str)
|
||||
if start_index == -1:
|
||||
return ""
|
||||
end_index = text.find(end_str, start_index + len(start_str))
|
||||
if end_index == -1:
|
||||
return ""
|
||||
|
||||
if pl == 0:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
return middle_text.replace("\\", "")
|
||||
|
||||
if pl == 1:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
matches = re.findall(start_index1, middle_text)
|
||||
if matches:
|
||||
jg = ' '.join(matches)
|
||||
return jg
|
||||
|
||||
if pl == 2:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
matches = re.findall(start_index1, middle_text)
|
||||
if matches:
|
||||
new_list = [f'{item}' for item in matches]
|
||||
jg = '$$$'.join(new_list)
|
||||
return jg
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {"class": []}
|
||||
|
||||
detail = requests.get(url=xurl + "/all/", headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
id = vod.find('a')['href']
|
||||
|
||||
name = vod.text.strip()
|
||||
|
||||
result["class"].append({"type_id": id, "type_name": "" + name})
|
||||
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, cid, pg, filter, ext):
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
if pg:
|
||||
page = int(pg)
|
||||
else:
|
||||
page = 1
|
||||
|
||||
url = f'{cid}page/{str(page)}/'
|
||||
detail = requests.get(url=url, headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
name = vod.find('img')['alt']
|
||||
|
||||
ids = vod.find('a', class_="image-line")
|
||||
id = ids['href']
|
||||
|
||||
pic = vod.find('img')['src']
|
||||
|
||||
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
|
||||
|
||||
video = {
|
||||
"vod_id": id,
|
||||
"vod_name": name,
|
||||
"vod_pic": pic,
|
||||
"vod_remarks": '▶️' + remark
|
||||
}
|
||||
videos.append(video)
|
||||
|
||||
result = {'list': videos}
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
did = ids[0]
|
||||
result = {}
|
||||
videos = []
|
||||
xianlu = ''
|
||||
bofang = ''
|
||||
|
||||
if 'http' not in did:
|
||||
did = xurl + did
|
||||
|
||||
res = requests.get(url=did, headers=headerx)
|
||||
res.encoding = "utf-8"
|
||||
res = res.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
|
||||
response = requests.get(url)
|
||||
response.encoding = 'utf-8'
|
||||
code = response.text
|
||||
name = self.extract_middle_text(code, "s1='", "'", 0)
|
||||
Jumps = self.extract_middle_text(code, "s2='", "'", 0)
|
||||
|
||||
content = self.extract_middle_text(res,'class="info-detail">','<', 0)
|
||||
|
||||
remarks = self.extract_middle_text(res, 'class="info-mark">', '<', 0)
|
||||
|
||||
year = self.extract_middle_text(res, 'class="info-addtime">', '<', 0)
|
||||
|
||||
if name not in content:
|
||||
bofang = Jumps
|
||||
xianlu = '1'
|
||||
else:
|
||||
soups = doc.find('div', class_="ep-list-items")
|
||||
|
||||
soup = soups.find_all('a')
|
||||
|
||||
for sou in soup:
|
||||
|
||||
id = sou['href']
|
||||
|
||||
name = sou.text.strip()
|
||||
|
||||
bofang = bofang + name + '$' + id + '#'
|
||||
|
||||
bofang = bofang[:-1]
|
||||
|
||||
xianlu = '专线'
|
||||
|
||||
videos.append({
|
||||
"vod_id": did,
|
||||
"vod_remarks": remarks,
|
||||
"vod_year": year,
|
||||
"vod_content": content,
|
||||
"vod_play_from": xianlu,
|
||||
"vod_play_url": bofang
|
||||
})
|
||||
|
||||
result['list'] = videos
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
|
||||
res = requests.get(url=id, headers=headerx)
|
||||
res.encoding = "utf-8"
|
||||
res = res.text
|
||||
|
||||
url = self.extract_middle_text(res, '"wwm3u8":"', '"', 0).replace('\\', '')
|
||||
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["playUrl"] = ''
|
||||
result["url"] = url
|
||||
result["header"] = headerx
|
||||
return result
|
||||
|
||||
def searchContentPage(self, key, quick, pg):
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
if pg:
|
||||
page = int(pg)
|
||||
else:
|
||||
page = 1
|
||||
|
||||
url = f'{xurl}/search/{key}/page/{str(page)}/'
|
||||
detail = requests.get(url=url, headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
name = vod.find('img')['alt']
|
||||
|
||||
ids = vod.find('a', class_="image-line")
|
||||
id = ids['href']
|
||||
|
||||
pic = vod.find('img')['src']
|
||||
|
||||
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
|
||||
|
||||
video = {
|
||||
"vod_id": id,
|
||||
"vod_name": name,
|
||||
"vod_pic": pic,
|
||||
"vod_remarks": '▶️' + remark
|
||||
}
|
||||
videos.append(video)
|
||||
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
return self.searchContentPage(key, quick, '1')
|
||||
|
||||
def localProxy(self, params):
|
||||
if params['type'] == "m3u8":
|
||||
return self.proxyM3u8(params)
|
||||
elif params['type'] == "media":
|
||||
return self.proxyMedia(params)
|
||||
elif params['type'] == "ts":
|
||||
return self.proxyTs(params)
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
1
jtx260111/api/LSYS.py
Normal file
1
jtx260111/api/LSYS.py
Normal file
@@ -0,0 +1 @@
|
||||
[session-b1439fbc] Route error: The Repository has been blocked. see: https://gitee.com/PizazzXS/another-d
|
||||
220
jtx260111/api/lingdu.py
Normal file
220
jtx260111/api/lingdu.py
Normal file
@@ -0,0 +1,220 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import random
|
||||
import sys
|
||||
from base64 import b64encode, b64decode
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
did=self.getdid()
|
||||
self.headers.update({'deviceId': did})
|
||||
token=self.gettk()
|
||||
self.headers.update({'token': token})
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host='http://ldys.sq1005.top'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/4.12.0',
|
||||
'client': 'app',
|
||||
'deviceType': 'Android'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
|
||||
result = {}
|
||||
cate = {
|
||||
"类型": "classify",
|
||||
"地区": "region",
|
||||
"年份": "year"
|
||||
}
|
||||
sort={
|
||||
'key':'sreecnTypeEnum',
|
||||
'name': '排序',
|
||||
'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
|
||||
}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in data['data']:
|
||||
classes.append({
|
||||
'type_name': k['name'],
|
||||
'type_id': k['id']
|
||||
})
|
||||
filters[k['id']] = []
|
||||
for v in k['children']:
|
||||
filters[k['id']].append({
|
||||
'name': v['name'],
|
||||
'key': cate[v['name']],
|
||||
'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
|
||||
})
|
||||
filters[k['id']].append(sort)
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
jdata={"condition":64,"pageNum":1,"pageSize":40}
|
||||
data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
|
||||
return {'list':self.getlist(data['data']['records'])}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
jdata = {
|
||||
'condition': {
|
||||
'sreecnTypeEnum': 'NEWEST',
|
||||
'typeId': tid,
|
||||
},
|
||||
'pageNum': int(pg),
|
||||
'pageSize': 40,
|
||||
}
|
||||
jdata['condition'].update(extend)
|
||||
data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
|
||||
result = {}
|
||||
result['list'] = self.getlist(data['data']['records'])
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
ids = ids[0].split('@@')
|
||||
jdata = {"id": int(ids[0]), "typeId": ids[-1]}
|
||||
v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
|
||||
v = v['data']
|
||||
vod = {
|
||||
'type_name': v.get('classify'),
|
||||
'vod_year': v.get('year'),
|
||||
'vod_area': v.get('area'),
|
||||
'vod_actor': v.get('star'),
|
||||
'vod_director': v.get('director'),
|
||||
'vod_content': v.get('introduce'),
|
||||
'vod_play_from': '',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
|
||||
l = c['data']['moviePlayerList']
|
||||
n = {str(i['id']): i['moviePlayerName'] for i in l}
|
||||
m = jdata.copy()
|
||||
m.update({'playerId': str(l[0]['id'])})
|
||||
pd = self.getv(m, c['data']['episodeList'])
|
||||
if len(l)-1:
|
||||
with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
|
||||
future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
|
||||
for future in future_to_player:
|
||||
try:
|
||||
o,p = future.result()
|
||||
pd.update(self.getv(o,p))
|
||||
except Exception as e:
|
||||
print(f"请求失败: {e}")
|
||||
w, e = [],[]
|
||||
for i, x in pd.items():
|
||||
if x:
|
||||
w.append(n[i])
|
||||
e.append(x)
|
||||
vod['vod_play_from'] = '$$$'.join(w)
|
||||
vod['vod_play_url'] = '$$$'.join(e)
|
||||
return {'list': [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
jdata={
|
||||
"condition": {
|
||||
"value": key
|
||||
},
|
||||
"pageNum": int(pg),
|
||||
"pageSize": 40
|
||||
}
|
||||
data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
|
||||
return {'list':self.getlist(data['data']['records']),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
jdata=json.loads(self.d64(id))
|
||||
data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
|
||||
try:
|
||||
params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
|
||||
pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
|
||||
url,p=pd['data'],0
|
||||
except Exception as e:
|
||||
print(f"请求失败: {e}")
|
||||
url,p=data['data']['url'],0
|
||||
return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def liveContent(self, url):
|
||||
pass
|
||||
|
||||
def gettk(self):
|
||||
data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
|
||||
return data['data']['token']
|
||||
|
||||
def getdid(self):
|
||||
did=self.getCache('ldid')
|
||||
if not did:
|
||||
hex_chars = '0123456789abcdef'
|
||||
did =''.join(random.choice(hex_chars) for _ in range(16))
|
||||
self.setCache('ldid',did)
|
||||
return did
|
||||
|
||||
def getd(self,jdata,player):
|
||||
x = jdata.copy()
|
||||
x.update({'playerId': str(player['id'])})
|
||||
response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
|
||||
return x, response['data']['episodeList']
|
||||
|
||||
def getv(self,d,c):
|
||||
f={d['playerId']:''}
|
||||
g=[]
|
||||
for i in c:
|
||||
j=d.copy()
|
||||
j.update({'episodeId':str(i['id'])})
|
||||
g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
|
||||
f[d['playerId']]='#'.join(g)
|
||||
return f
|
||||
|
||||
def getlist(self,data):
|
||||
videos = []
|
||||
for i in data:
|
||||
videos.append({
|
||||
'vod_id': f"{i['id']}@@{i['typeId']}",
|
||||
'vod_name': i.get('name'),
|
||||
'vod_pic': i.get('cover'),
|
||||
'vod_year': i.get('year'),
|
||||
'vod_remarks': i.get('totalEpisode')
|
||||
})
|
||||
return videos
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
2
jtx260111/api/node-rsa.js
Normal file
2
jtx260111/api/node-rsa.js
Normal file
File diff suppressed because one or more lines are too long
2
jtx260111/api/pako.min.js
vendored
Normal file
2
jtx260111/api/pako.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user