-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathingest.py
More file actions
92 lines (70 loc) · 2.89 KB
/
ingest.py
File metadata and controls
92 lines (70 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
"""
Data ingestion: scrape 飞享IM website and build the BM25 retriever.
Run once before starting the Q&A service.
"""
import json
import requests
from bs4 import BeautifulSoup
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.retrievers import BM25Retriever
from langchain_core.documents import Document
from config import (
DOCS_PERSIST_PATH,
FSHARECHAT_URLS,
FSHARECHAT_STATIC_KNOWLEDGE,
RETRIEVER_K,
)
def scrape_page(url: str) -> str:
"""Fetch and extract clean text from a webpage."""
try:
resp = requests.get(url, timeout=10, headers={"User-Agent": "Mozilla/5.0"})
resp.raise_for_status()
soup = BeautifulSoup(resp.text, "html.parser")
for tag in soup(["script", "style", "nav", "footer"]):
tag.decompose()
return soup.get_text(separator="\n", strip=True)
except Exception as e:
print(f" Warning: failed to scrape {url}: {e}")
return ""
def load_documents() -> list[Document]:
"""Load documents from website scraping + static knowledge."""
docs: list[Document] = []
print("Scraping 飞享IM website...")
for url in FSHARECHAT_URLS:
print(f" Fetching {url}")
text = scrape_page(url)
if text:
docs.append(Document(page_content=text, metadata={"source": url}))
docs.append(Document(
page_content=FSHARECHAT_STATIC_KNOWLEDGE,
metadata={"source": "static_knowledge"},
))
print(f"Loaded {len(docs)} documents total")
return docs
def _chunks_to_json(chunks: list[Document]) -> list[dict]:
return [{"page_content": d.page_content, "metadata": d.metadata} for d in chunks]
def _json_to_chunks(data: list[dict]) -> list[Document]:
return [Document(page_content=d["page_content"], metadata=d["metadata"]) for d in data]
def build_retriever() -> BM25Retriever:
"""Chunk documents, persist to JSON, and return a BM25Retriever."""
docs = load_documents()
splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
separators=["\n\n", "\n", "。", "!", "?", " ", ""],
)
chunks = splitter.split_documents(docs)
print(f"Split into {len(chunks)} chunks")
with open(DOCS_PERSIST_PATH, "w", encoding="utf-8") as f:
json.dump(_chunks_to_json(chunks), f, ensure_ascii=False, indent=2)
print(f"Documents saved to {DOCS_PERSIST_PATH}")
retriever = BM25Retriever.from_documents(chunks, k=RETRIEVER_K)
return retriever
def load_retriever() -> BM25Retriever:
"""Load persisted chunks from JSON and return a BM25Retriever."""
with open(DOCS_PERSIST_PATH, "r", encoding="utf-8") as f:
chunks = _json_to_chunks(json.load(f))
return BM25Retriever.from_documents(chunks, k=RETRIEVER_K)
if __name__ == "__main__":
build_retriever()
print("Ingestion complete. Ready to answer questions about 飞享IM.")