logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

startpage.py (3524B)


  1. # Startpage (Web)
  2. #
  3. # @website https://startpage.com
  4. # @provide-api no (nothing found)
  5. #
  6. # @using-api no
  7. # @results HTML
  8. # @stable no (HTML can change)
  9. # @parse url, title, content
  10. #
  11. # @todo paging
  12. from lxml import html
  13. from dateutil import parser
  14. from datetime import datetime, timedelta
  15. import re
  16. from searx.engines.xpath import extract_text
  17. # engine dependent config
  18. categories = ['general']
  19. # there is a mechanism to block "bot" search
  20. # (probably the parameter qid), require
  21. # storing of qid's between mulitble search-calls
  22. # paging = False
  23. language_support = True
  24. # search-url
  25. base_url = 'https://startpage.com/'
  26. search_url = base_url + 'do/search'
  27. # specific xpath variables
  28. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  29. # not ads: div[@class="result"] are the direct childs of div[@id="results"]
  30. results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]'
  31. link_xpath = './/h3/a'
  32. content_xpath = './p[@class="search-item__body"]'
  33. # do search-request
  34. def request(query, params):
  35. offset = (params['pageno'] - 1) * 10
  36. params['url'] = search_url
  37. params['method'] = 'POST'
  38. params['data'] = {'query': query,
  39. 'startat': offset}
  40. # set language
  41. params['data']['with_language'] = ('lang_' + params['language'].split('-')[0])
  42. return params
  43. # get response from search-request
  44. def response(resp):
  45. results = []
  46. dom = html.fromstring(resp.text)
  47. # parse results
  48. for result in dom.xpath(results_xpath):
  49. links = result.xpath(link_xpath)
  50. if not links:
  51. continue
  52. link = links[0]
  53. url = link.attrib.get('href')
  54. # block google-ad url's
  55. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  56. continue
  57. # block startpage search url's
  58. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  59. continue
  60. title = extract_text(link)
  61. if result.xpath(content_xpath):
  62. content = extract_text(result.xpath(content_xpath))
  63. else:
  64. content = ''
  65. published_date = None
  66. # check if search result starts with something like: "2 Sep 2014 ... "
  67. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  68. date_pos = content.find('...') + 4
  69. date_string = content[0:date_pos - 5]
  70. published_date = parser.parse(date_string, dayfirst=True)
  71. # fix content string
  72. content = content[date_pos:]
  73. # check if search result starts with something like: "5 days ago ... "
  74. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  75. date_pos = content.find('...') + 4
  76. date_string = content[0:date_pos - 5]
  77. # calculate datetime
  78. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  79. # fix content string
  80. content = content[date_pos:]
  81. if published_date:
  82. # append result
  83. results.append({'url': url,
  84. 'title': title,
  85. 'content': content,
  86. 'publishedDate': published_date})
  87. else:
  88. # append result
  89. results.append({'url': url,
  90. 'title': title,
  91. 'content': content})
  92. # return results
  93. return results