logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

bing_images.py (3609B)


  1. """
  2. Bing (Images)
  3. @website https://www.bing.com/images
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, img_src
  10. @todo currently there are up to 35 images receive per page,
  11. because bing does not parse count=10.
  12. limited response to 10 images
  13. """
  14. from lxml import html
  15. from json import loads
  16. import re
  17. from searx.url_utils import urlencode
  18. from searx.utils import match_language
  19. # engine dependent config
  20. categories = ['images']
  21. paging = True
  22. safesearch = True
  23. time_range_support = True
  24. language_support = True
  25. supported_languages_url = 'https://www.bing.com/account/general'
  26. # search-url
  27. base_url = 'https://www.bing.com/'
  28. search_string = 'images/search?{query}&count=10&first={offset}'
  29. time_range_string = '&qft=+filterui:age-lt{interval}'
  30. time_range_dict = {'day': '1440',
  31. 'week': '10080',
  32. 'month': '43200',
  33. 'year': '525600'}
  34. # safesearch definitions
  35. safesearch_types = {2: 'STRICT',
  36. 1: 'DEMOTE',
  37. 0: 'OFF'}
  38. _quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U)
  39. # do search-request
  40. def request(query, params):
  41. offset = (params['pageno'] - 1) * 10 + 1
  42. search_path = search_string.format(
  43. query=urlencode({'q': query}),
  44. offset=offset)
  45. language = match_language(params['language'], supported_languages).lower()
  46. params['cookies']['SRCHHPGUSR'] = \
  47. 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
  48. params['cookies']['_EDGE_S'] = 'mkt=' + language +\
  49. '&ui=' + language + '&F=1'
  50. params['url'] = base_url + search_path
  51. if params['time_range'] in time_range_dict:
  52. params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
  53. return params
  54. # get response from search-request
  55. def response(resp):
  56. results = []
  57. dom = html.fromstring(resp.text)
  58. # parse results
  59. for result in dom.xpath('//div[@id="mmComponent_images_1"]/ul/li/div/div[@class="imgpt"]'):
  60. link = result.xpath('./a')[0]
  61. # TODO find actual title
  62. title = link.xpath('.//img/@alt')[0]
  63. # parse json-data (it is required to add a space, to make it parsable)
  64. json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('m')))
  65. url = json_data.get('purl')
  66. img_src = json_data.get('murl')
  67. thumbnail = json_data.get('turl')
  68. # append result
  69. results.append({'template': 'images.html',
  70. 'url': url,
  71. 'title': title,
  72. 'content': '',
  73. 'thumbnail_src': thumbnail,
  74. 'img_src': img_src})
  75. # TODO stop parsing if 10 images are found
  76. # if len(results) >= 10:
  77. # break
  78. # return results
  79. return results
  80. # get supported languages from their site
  81. def _fetch_supported_languages(resp):
  82. supported_languages = []
  83. dom = html.fromstring(resp.text)
  84. regions_xpath = '//div[@id="region-section-content"]' \
  85. + '//ul[@class="b_vList"]/li/a/@href'
  86. regions = dom.xpath(regions_xpath)
  87. for region in regions:
  88. code = re.search('setmkt=[^\&]+', region).group()[7:]
  89. if code == 'nb-NO':
  90. code = 'no-NO'
  91. supported_languages.append(code)
  92. return supported_languages