logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://anongit.hacktivis.me/git/searx.git/

bing.py (2955B)


  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from lxml import html
  13. from searx.engines.xpath import extract_text
  14. from searx.url_utils import urlencode
  15. from searx.utils import match_language, gen_useragent
  16. # engine dependent config
  17. categories = ['general']
  18. paging = True
  19. language_support = True
  20. supported_languages_url = 'https://www.bing.com/account/general'
  21. language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
  22. # search-url
  23. base_url = 'https://www.bing.com/'
  24. search_string = 'search?{query}&first={offset}'
  25. # do search-request
  26. def request(query, params):
  27. offset = (params['pageno'] - 1) * 10 + 1
  28. lang = match_language(params['language'], supported_languages, language_aliases)
  29. query = u'language:{} {}'.format(lang.split('-')[0].upper(), query.decode('utf-8')).encode('utf-8')
  30. search_path = search_string.format(
  31. query=urlencode({'q': query}),
  32. offset=offset)
  33. params['url'] = base_url + search_path
  34. params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64')
  35. return params
  36. # get response from search-request
  37. def response(resp):
  38. results = []
  39. dom = html.fromstring(resp.text)
  40. try:
  41. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  42. .split()[0].replace(',', ''))})
  43. except:
  44. pass
  45. # parse results
  46. for result in dom.xpath('//div[@class="sa_cc"]'):
  47. link = result.xpath('.//h3/a')[0]
  48. url = link.attrib.get('href')
  49. title = extract_text(link)
  50. content = extract_text(result.xpath('.//p'))
  51. # append result
  52. results.append({'url': url,
  53. 'title': title,
  54. 'content': content})
  55. # parse results again if nothing is found yet
  56. for result in dom.xpath('//li[@class="b_algo"]'):
  57. link = result.xpath('.//h2/a')[0]
  58. url = link.attrib.get('href')
  59. title = extract_text(link)
  60. content = extract_text(result.xpath('.//p'))
  61. # append result
  62. results.append({'url': url,
  63. 'title': title,
  64. 'content': content})
  65. # return results
  66. return results
  67. # get supported languages from their site
  68. def _fetch_supported_languages(resp):
  69. supported_languages = []
  70. dom = html.fromstring(resp.text)
  71. options = dom.xpath('//div[@id="limit-languages"]//input')
  72. for option in options:
  73. code = option.xpath('./@id')[0].replace('_', '-')
  74. if code == 'nb':
  75. code = 'no'
  76. supported_languages.append(code)
  77. return supported_languages