collector.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. package webcollector
  2. import (
  3. "context"
  4. "encoding/json"
  5. "log"
  6. "net/url"
  7. "regexp"
  8. "strings"
  9. "sync/atomic"
  10. "time"
  11. "github.com/redis/go-redis/v9"
  12. "spider/internal/crawler"
  13. "spider/internal/extractor"
  14. "spider/internal/plugin"
  15. proxypool "spider/internal/proxy"
  16. "spider/internal/search"
  17. )
  18. const snapshotKey = "spider:webcollector:snapshot"
  19. // Collector implements plugin.Collector for web-based merchant collection.
  20. type Collector struct {
  21. serper *search.SerperClient
  22. static *crawler.StaticCrawler
  23. dynamic *crawler.DynamicCrawler
  24. tmeValidator *crawler.TMeValidator
  25. stopped atomic.Bool
  26. logger plugin.TaskLogger
  27. proxyPool *proxypool.Pool
  28. rdb *redis.Client
  29. }
  30. func New(serper *search.SerperClient, rdb *redis.Client) *Collector {
  31. return &Collector{
  32. serper: serper,
  33. static: crawler.NewStaticCrawler(),
  34. dynamic: crawler.NewDynamicCrawler(),
  35. tmeValidator: crawler.NewTMeValidator(),
  36. logger: plugin.NopLogger(),
  37. rdb: rdb,
  38. }
  39. }
  40. func (c *Collector) Name() string { return "web_collector" }
  41. func (c *Collector) SetLogger(l plugin.TaskLogger) { c.logger = l }
  42. func (c *Collector) Stop() error {
  43. c.stopped.Store(true)
  44. return nil
  45. }
  46. func (c *Collector) Run(ctx context.Context, cfg map[string]any, callback func(plugin.MerchantData)) error {
  47. c.stopped.Store(false)
  48. // Apply proxy pool or single proxy
  49. if pool, ok := cfg["proxy_pool"].(*proxypool.Pool); ok && pool != nil {
  50. c.proxyPool = pool
  51. log.Printf("[web_collector] using proxy pool with %d proxies", pool.Size())
  52. } else if proxyURL, ok := cfg["proxy_url"].(string); ok && proxyURL != "" {
  53. c.static.SetProxy(proxyURL)
  54. log.Printf("[web_collector] using proxy: %s", proxyURL)
  55. }
  56. if c.serper == nil {
  57. log.Println("[web_collector] no serper client configured, skipping")
  58. return nil
  59. }
  60. keywords, _ := cfg["keywords"].([]string)
  61. if len(keywords) == 0 {
  62. log.Println("[web_collector] no keywords provided")
  63. return nil
  64. }
  65. // Stop conditions
  66. maxMerchants, _ := cfg["max_merchants"].(int)
  67. maxDurationMins, _ := cfg["max_duration_mins"].(int)
  68. resumeSnapshot, _ := cfg["resume_snapshot"].(bool)
  69. var deadline time.Time
  70. if maxDurationMins > 0 {
  71. deadline = time.Now().Add(time.Duration(maxDurationMins) * time.Minute)
  72. log.Printf("[web_collector] will stop after %d minutes", maxDurationMins)
  73. }
  74. // Load or init snapshot
  75. snapshot := c.loadSnapshot(ctx)
  76. if !resumeSnapshot {
  77. snapshot = map[string]bool{}
  78. log.Println("[web_collector] starting fresh (snapshot cleared)")
  79. } else if len(snapshot) > 0 {
  80. log.Printf("[web_collector] resuming from snapshot, %d queries already done", len(snapshot))
  81. }
  82. merchantCount := 0
  83. wrappedCallback := func(md plugin.MerchantData) {
  84. callback(md)
  85. merchantCount++
  86. }
  87. queries := expandSearchQueries(keywords)
  88. for _, q := range queries {
  89. if c.stopped.Load() || ctx.Err() != nil {
  90. break
  91. }
  92. if maxMerchants > 0 && merchantCount >= maxMerchants {
  93. log.Printf("[web_collector] reached max_merchants limit (%d), stopping", maxMerchants)
  94. break
  95. }
  96. if !deadline.IsZero() && time.Now().After(deadline) {
  97. log.Printf("[web_collector] reached max_duration limit, stopping")
  98. break
  99. }
  100. if snapshot[q] {
  101. log.Printf("[web_collector] skipping (snapshot): %s", q)
  102. continue
  103. }
  104. // Rotate proxy for each query if using pool
  105. c.rotateProxy()
  106. log.Printf("[web_collector] searching: %s", q)
  107. // ── Organic search ──
  108. t0 := time.Now()
  109. results, err := c.serper.Search(ctx, q)
  110. if err != nil {
  111. log.Printf("[web_collector] search error: %v", err)
  112. c.logger.LogError("search", "", err.Error())
  113. } else {
  114. log.Printf("[web_collector] organic search %q: %d results in %dms", q, len(results), time.Since(t0).Milliseconds())
  115. for i, r := range results {
  116. c.logger.LogSearchResult(q+" [organic]", i+1, r.Title, r.URL, r.Snippet)
  117. }
  118. c.processResults(ctx, results, q, wrappedCallback)
  119. }
  120. time.Sleep(1 * time.Second)
  121. // ── Video search ──
  122. if c.stopped.Load() || ctx.Err() != nil {
  123. break
  124. }
  125. videoResults, err := c.serper.SearchVideos(ctx, q)
  126. if err != nil {
  127. c.logger.LogError("search_videos", "", err.Error())
  128. } else {
  129. for i, r := range videoResults {
  130. c.logger.LogSearchResult(q+" [video]", i+1, r.Title, r.URL, r.Snippet)
  131. }
  132. c.processResults(ctx, videoResults, q, wrappedCallback)
  133. }
  134. // Mark query as done in snapshot
  135. snapshot[q] = true
  136. c.saveSnapshot(ctx, snapshot)
  137. select {
  138. case <-ctx.Done():
  139. return nil
  140. case <-time.After(2 * time.Second):
  141. }
  142. }
  143. // If all queries done naturally, clear snapshot for next full run
  144. allDone := true
  145. for _, q := range queries {
  146. if !snapshot[q] {
  147. allDone = false
  148. break
  149. }
  150. }
  151. if allDone && c.rdb != nil {
  152. c.rdb.Del(ctx, snapshotKey)
  153. log.Println("[web_collector] all queries done, snapshot cleared")
  154. }
  155. log.Printf("[web_collector] done, collected %d merchants", merchantCount)
  156. return nil
  157. }
  158. func (c *Collector) loadSnapshot(ctx context.Context) map[string]bool {
  159. if c.rdb == nil {
  160. return map[string]bool{}
  161. }
  162. data, err := c.rdb.Get(ctx, snapshotKey).Bytes()
  163. if err != nil {
  164. return map[string]bool{}
  165. }
  166. var snapshot map[string]bool
  167. if err := json.Unmarshal(data, &snapshot); err != nil {
  168. return map[string]bool{}
  169. }
  170. return snapshot
  171. }
  172. func (c *Collector) saveSnapshot(ctx context.Context, snapshot map[string]bool) {
  173. if c.rdb == nil {
  174. return
  175. }
  176. data, err := json.Marshal(snapshot)
  177. if err != nil {
  178. return
  179. }
  180. // Keep snapshot for 7 days
  181. c.rdb.Set(ctx, snapshotKey, data, 7*24*time.Hour)
  182. }
  183. // processResults handles search results with full logging at every node.
  184. func (c *Collector) processResults(ctx context.Context, results []search.SearchResult, query string, callback func(plugin.MerchantData)) {
  185. for _, r := range results {
  186. if c.stopped.Load() || ctx.Err() != nil {
  187. break
  188. }
  189. // ── Node 1: Extract from snippet text ──
  190. snippetText := r.Title + " " + r.Snippet
  191. c.extractFromSnippet(snippetText, r.Title, r.URL, callback)
  192. // ── Node 2: Extract URLs from snippet → crawl them ──
  193. snippetURLs := reURL.FindAllString(r.Snippet, -1)
  194. for _, sURL := range snippetURLs {
  195. if c.stopped.Load() || ctx.Err() != nil {
  196. break
  197. }
  198. sURL = strings.TrimRight(sURL, ".,;)\"'")
  199. if strings.Contains(sURL, "t.me/") || strings.Contains(sURL, "telegram.me/") {
  200. username := extractTGUsername(sURL)
  201. if username != "" {
  202. md := plugin.MerchantData{
  203. TgUsername: username, TgLink: "https://t.me/" + username,
  204. SourceType: "web", SourceName: r.Title, SourceURL: r.URL,
  205. }
  206. c.logger.LogMerchantFound(md, "snippet_tme_url", 0, r.URL)
  207. callback(md)
  208. }
  209. continue
  210. }
  211. if isBlacklistDomain(sURL) {
  212. c.logger.LogSkip("crawl_snippet_url", sURL, "blacklisted_domain")
  213. continue
  214. }
  215. // Crawl URLs found inside snippets — depth=1, parent is the serper result
  216. c.crawlAndExtract(ctx, sURL, r.URL, 1, r.Title, callback)
  217. }
  218. // ── Node 3: Crawl the result URL itself ──
  219. classification := search.ClassifyURL(r.URL)
  220. c.logger.LogSkip("classify", r.URL, classification) // log classification decision
  221. switch classification {
  222. case "tg_channel":
  223. username := extractTGUsername(r.URL)
  224. if username != "" {
  225. md := plugin.MerchantData{
  226. TgUsername: username, TgLink: "https://t.me/" + username,
  227. SourceType: "web", SourceName: r.Title, SourceURL: r.URL,
  228. }
  229. c.logger.LogMerchantFound(md, "direct_tme_link", 0, "")
  230. callback(md)
  231. }
  232. case "nav_site", "web_page":
  233. if crawler.RuleFilter(r.URL) != crawler.FilterDiscard {
  234. c.crawlAndExtract(ctx, r.URL, "", 0, r.Title, callback)
  235. } else {
  236. c.logger.LogSkip("crawl", r.URL, "rule_filter_discard")
  237. }
  238. default:
  239. c.logger.LogSkip("crawl", r.URL, "classification_discard")
  240. }
  241. }
  242. }
  243. // extractFromSnippet extracts contacts from snippet/title text and logs everything.
  244. func (c *Collector) extractFromSnippet(text, title, sourceURL string, callback func(plugin.MerchantData)) {
  245. contacts := extractor.ExtractAll(text)
  246. var usernames []string
  247. for _, info := range contacts {
  248. if info.TgUsername == "" {
  249. continue
  250. }
  251. usernames = append(usernames, info.TgUsername)
  252. md := plugin.MerchantData{
  253. TgUsername: info.TgUsername, TgLink: "https://t.me/" + info.TgUsername,
  254. Website: info.Website, Email: info.Email, Phone: info.Phone,
  255. SourceType: "web", SourceName: title, SourceURL: sourceURL,
  256. OriginalText: text,
  257. }
  258. c.logger.LogMerchantFound(md, "snippet_regex", 0, "")
  259. callback(md)
  260. }
  261. // Always log snippet extraction — even if empty (for audit: "we looked, nothing found")
  262. c.logger.LogSnippetExtract(sourceURL, text, usernames)
  263. }
  264. // rotateProxy switches to the next proxy in the pool (if pool mode).
  265. // Returns the proxy URL being used (for health reporting).
  266. func (c *Collector) rotateProxy() string {
  267. if c.proxyPool == nil {
  268. return ""
  269. }
  270. nextURL := c.proxyPool.Next()
  271. if nextURL != "" {
  272. c.static.SetProxy(nextURL)
  273. log.Printf("[web_collector] rotated to proxy: %s", nextURL)
  274. }
  275. return nextURL
  276. }
  277. // reportProxyResult reports success/failure to the proxy pool for a specific proxy.
  278. func (c *Collector) reportProxyResult(proxyURL string, err error) {
  279. if c.proxyPool == nil || proxyURL == "" {
  280. return
  281. }
  282. if err != nil {
  283. c.proxyPool.ReportFailure(proxyURL)
  284. } else {
  285. c.proxyPool.ReportSuccess(proxyURL)
  286. }
  287. }
  288. // crawlAndExtract fetches a page, extracts contacts, and follows sub-links.
  289. // depth tracks how deep we are from the original serper result.
  290. // parentURL tracks which page led us here.
  291. func (c *Collector) crawlAndExtract(ctx context.Context, pageURL, parentURL string, depth int, title string, callback func(plugin.MerchantData)) {
  292. if depth > 2 {
  293. c.logger.LogSkip("crawl", pageURL, "max_depth_exceeded")
  294. return
  295. }
  296. // Rotate proxy and capture which proxy is being used
  297. usedProxy := c.rotateProxy()
  298. // ── Fetch page ──
  299. t0 := time.Now()
  300. result := c.static.Crawl(ctx, pageURL)
  301. c.reportProxyResult(usedProxy, result.Error)
  302. if result.Error != nil || result.HTML == "" {
  303. // On failure with pool, try once more with next proxy
  304. if c.proxyPool != nil && result.Error != nil {
  305. usedProxy = c.rotateProxy()
  306. result = c.static.Crawl(ctx, pageURL)
  307. c.reportProxyResult(usedProxy, result.Error)
  308. }
  309. if result.Error != nil || result.HTML == "" {
  310. result = c.dynamic.Crawl(ctx, pageURL)
  311. }
  312. }
  313. dur := time.Since(t0)
  314. if result.Error != nil || result.HTML == "" {
  315. c.logger.LogCrawlPage(pageURL, parentURL, depth, "", nil, 0, result.Error, dur)
  316. return
  317. }
  318. // Content filter
  319. hasTgLinks := len(result.TgLinks) > 0
  320. if !hasTgLinks {
  321. snippet := result.HTML
  322. if len(snippet) > 5000 {
  323. snippet = snippet[:5000]
  324. }
  325. if !extractor.ContainsChinese(snippet, 0) && !extractor.HasContact(snippet) {
  326. c.logger.LogCrawlPage(pageURL, parentURL, depth, snippet, nil, len(result.Links), nil, dur)
  327. c.logger.LogSkip("crawl", pageURL, "no_chinese_no_contact")
  328. return
  329. }
  330. }
  331. // ── Log crawl with content summary ──
  332. htmlSummary := result.HTML
  333. if len(htmlSummary) > 2000 {
  334. htmlSummary = htmlSummary[:2000]
  335. }
  336. c.logger.LogCrawlPage(pageURL, parentURL, depth, htmlSummary, result.TgLinks, len(result.Links), nil, dur)
  337. // ── Extract from t.me links in <a href> ──
  338. seenUsernames := map[string]bool{}
  339. for _, tgLink := range result.TgLinks {
  340. username := crawler.ExtractTGUsername(tgLink)
  341. if username == "" || seenUsernames[strings.ToLower(username)] {
  342. continue
  343. }
  344. seenUsernames[strings.ToLower(username)] = true
  345. md := plugin.MerchantData{
  346. TgUsername: username, TgLink: "https://t.me/" + username,
  347. SourceType: "web", SourceName: title, SourceURL: pageURL,
  348. }
  349. c.logger.LogMerchantFound(md, "crawl_href", depth, parentURL)
  350. callback(md)
  351. }
  352. // ── Extract from page text ──
  353. allContacts := extractor.ExtractAll(result.HTML)
  354. var extractedNames []string
  355. for _, info := range allContacts {
  356. if info.TgUsername == "" || seenUsernames[strings.ToLower(info.TgUsername)] {
  357. continue
  358. }
  359. seenUsernames[strings.ToLower(info.TgUsername)] = true
  360. extractedNames = append(extractedNames, info.TgUsername)
  361. md := plugin.MerchantData{
  362. TgUsername: info.TgUsername, TgLink: "https://t.me/" + info.TgUsername,
  363. Website: info.Website, Email: info.Email, Phone: info.Phone,
  364. SourceType: "web", SourceName: title, SourceURL: pageURL,
  365. }
  366. c.logger.LogMerchantFound(md, "crawl_text", depth, parentURL)
  367. callback(md)
  368. }
  369. // Log page extraction results
  370. contentSample := result.HTML
  371. if len(contentSample) > 1000 {
  372. contentSample = contentSample[:1000]
  373. }
  374. c.logger.LogPageExtract(pageURL, parentURL, depth, contentSample, extractedNames)
  375. // ── Follow sub-links to deeper pages (depth+1) ──
  376. if depth < 2 {
  377. subPages := collectSubPages(pageURL, result.Links)
  378. for _, link := range subPages {
  379. if c.stopped.Load() || ctx.Err() != nil {
  380. break
  381. }
  382. if strings.Contains(link, "t.me") || strings.Contains(link, "telegram.me") {
  383. continue
  384. }
  385. if crawler.RuleFilter(link) == crawler.FilterDiscard {
  386. continue
  387. }
  388. c.crawlAndExtract(ctx, link, pageURL, depth+1, title, callback)
  389. }
  390. }
  391. }
  392. // collectSubPages picks sub-pages worth crawling from a page's links.
  393. // Prioritizes contact/about/support pages plus same-domain internal links.
  394. func collectSubPages(baseURL string, links []string) []string {
  395. baseDomain := extractDomain(baseURL)
  396. if baseDomain == "" {
  397. return nil
  398. }
  399. // Priority paths
  400. contactPaths := []string{"/contact", "/contact-us", "/about", "/about-us", "/support", "/faq", "/help"}
  401. var priority, sameDomain []string
  402. seen := map[string]bool{baseURL: true}
  403. for _, link := range links {
  404. if seen[link] {
  405. continue
  406. }
  407. seen[link] = true
  408. linkDomain := extractDomain(link)
  409. if linkDomain != baseDomain {
  410. continue
  411. }
  412. lower := strings.ToLower(link)
  413. isPriority := false
  414. for _, p := range contactPaths {
  415. if strings.Contains(lower, p) {
  416. priority = append(priority, link)
  417. isPriority = true
  418. break
  419. }
  420. }
  421. if !isPriority && len(sameDomain) < 5 {
  422. sameDomain = append(sameDomain, link)
  423. }
  424. }
  425. result := append(priority, sameDomain...)
  426. if len(result) > 10 {
  427. result = result[:10]
  428. }
  429. return result
  430. }
  431. func expandSearchQueries(keywords []string) []string {
  432. suffixes := []string{
  433. "",
  434. " telegram",
  435. " t.me",
  436. " 电报",
  437. " 联系方式 telegram",
  438. }
  439. seen := map[string]bool{}
  440. var queries []string
  441. for _, kw := range keywords {
  442. for _, suffix := range suffixes {
  443. q := kw + suffix
  444. if !seen[q] {
  445. seen[q] = true
  446. queries = append(queries, q)
  447. }
  448. }
  449. }
  450. return queries
  451. }
  452. func isBlacklistDomain(u string) bool {
  453. bl := []string{"youtube.com", "google.com", "twitter.com", "facebook.com",
  454. "instagram.com", "bit.ly", "gstatic.com", "wikipedia.org", "x.com"}
  455. lower := strings.ToLower(u)
  456. for _, b := range bl {
  457. if strings.Contains(lower, b) {
  458. return true
  459. }
  460. }
  461. return false
  462. }
  463. var reTGUsername = regexp.MustCompile(`(?:t(?:elegram)?\.me)/([a-zA-Z][a-zA-Z0-9_]{4,31})`)
  464. var reURL = regexp.MustCompile(`https?://[^\s<>"'\x{4e00}-\x{9fa5}]+`)
  465. func extractTGUsername(rawURL string) string {
  466. m := reTGUsername.FindStringSubmatch(rawURL)
  467. if len(m) > 1 {
  468. return m[1]
  469. }
  470. return ""
  471. }
  472. func extractDomain(rawURL string) string {
  473. u, err := url.Parse(rawURL)
  474. if err != nil {
  475. return ""
  476. }
  477. return u.Hostname()
  478. }