refactor: 重组目录结构
- src/ 存放源代码 - docs/ 存放文档 - scripts/ 存放工具脚本 - 移除临时文件
This commit is contained in:
47
scripts/generate_icons.py
Normal file
47
scripts/generate_icons.py
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
"""生成简单的扩展图标"""
|
||||
import struct
|
||||
import zlib
|
||||
|
||||
def create_png(size, color=(102, 126, 234)):
|
||||
"""创建简单的纯色 PNG 图标"""
|
||||
|
||||
def chunk(chunk_type, data):
|
||||
return struct.pack('>I', len(data)) + chunk_type + data + struct.pack('>I', zlib.crc32(chunk_type + data) & 0xffffffff)
|
||||
|
||||
# PNG signature
|
||||
signature = b'\x89PNG\r\n\x1a\n'
|
||||
|
||||
# IHDR chunk
|
||||
ihdr_data = struct.pack('>IIBBBBB', size, size, 8, 2, 0, 0, 0)
|
||||
ihdr = chunk(b'IHDR', ihdr_data)
|
||||
|
||||
# IDAT chunk (raw image data)
|
||||
raw_data = b''
|
||||
for y in range(size):
|
||||
raw_data += b'\x00' # filter byte
|
||||
for x in range(size):
|
||||
# 创建圆角矩形效果
|
||||
cx, cy = size / 2, size / 2
|
||||
radius = size * 0.4
|
||||
corner_radius = size * 0.15
|
||||
|
||||
# 简化:纯色填充
|
||||
raw_data += bytes(color)
|
||||
|
||||
compressed = zlib.compress(raw_data, 9)
|
||||
idat = chunk(b'IDAT', compressed)
|
||||
|
||||
# IEND chunk
|
||||
iend = chunk(b'IEND', b'')
|
||||
|
||||
return signature + ihdr + idat + iend
|
||||
|
||||
# 生成不同尺寸的图标
|
||||
for size in [16, 48, 128]:
|
||||
png_data = create_png(size)
|
||||
with open(f'icons/icon{size}.png', 'wb') as f:
|
||||
f.write(png_data)
|
||||
print(f'Generated icons/icon{size}.png')
|
||||
|
||||
print('Done!')
|
||||
91
scripts/mcp.py
Normal file
91
scripts/mcp.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import json
|
||||
|
||||
url = "https://developers.weixin.qq.com/miniprogram/dev/framework/search/seo.html"
|
||||
|
||||
headers = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/121.0.0.0 Safari/537.36"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def fetch_html(url):
|
||||
resp = requests.get(url, headers=headers, timeout=10)
|
||||
resp.raise_for_status()
|
||||
resp.encoding = resp.apparent_encoding
|
||||
return resp.text
|
||||
|
||||
|
||||
def extract_structured_content(html):
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
|
||||
# 可能的正文容器
|
||||
main = soup.select_one("#docContent, .content, #page-content, .page-content")
|
||||
if not main:
|
||||
main = soup.body # 最后兜底
|
||||
|
||||
data = []
|
||||
|
||||
for el in main.descendants:
|
||||
if el.name in ["h1", "h2", "h3", "h4"]:
|
||||
data.append({
|
||||
"type": "heading",
|
||||
"level": int(el.name[-1]),
|
||||
"content": el.get_text(strip=True)
|
||||
})
|
||||
|
||||
elif el.name == "p":
|
||||
txt = el.get_text(" ", strip=True)
|
||||
if txt:
|
||||
data.append({
|
||||
"type": "paragraph",
|
||||
"content": txt
|
||||
})
|
||||
|
||||
elif el.name == "pre":
|
||||
code = el.get_text("\n", strip=False)
|
||||
if code:
|
||||
data.append({
|
||||
"type": "code",
|
||||
"lang": el.get("lang") or el.get("data-lang") or "text",
|
||||
"content": code
|
||||
})
|
||||
|
||||
elif el.name == "table":
|
||||
rows = []
|
||||
for tr in el.select("tr"):
|
||||
cols = [td.get_text(" ", strip=True) for td in tr.select("th,td")]
|
||||
rows.append(cols)
|
||||
|
||||
data.append({
|
||||
"type": "table",
|
||||
"rows": rows
|
||||
})
|
||||
|
||||
elif el.name in ["ul", "ol"]:
|
||||
items = [
|
||||
li.get_text(" ", strip=True)
|
||||
for li in el.select("li")
|
||||
]
|
||||
if items:
|
||||
data.append({
|
||||
"type": "list",
|
||||
"ordered": el.name == "ol",
|
||||
"items": items
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
html = fetch_html(url)
|
||||
structured = extract_structured_content(html)
|
||||
|
||||
with open("wechat_dev_seo_structured.json", "w", encoding="utf-8") as f:
|
||||
json.dump(structured, f, ensure_ascii=False, indent=2)
|
||||
|
||||
print("结构化内容已写入 wechat_dev_seo_structured.json")
|
||||
Reference in New Issue
Block a user