logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

qwant.py (4367B)


  1. """
  2. Qwant (Web, Images, News, Social)
  3. @website https://qwant.com/
  4. @provide-api not officially (https://api.qwant.com/api/search/)
  5. @using-api yes
  6. @results JSON
  7. @stable yes
  8. @parse url, title, content
  9. """
  10. from datetime import datetime
  11. from json import loads
  12. from searx.utils import html_to_text
  13. from searx.url_utils import urlencode
  14. from searx.utils import match_language
  15. # engine dependent config
  16. categories = None
  17. paging = True
  18. language_support = True
  19. supported_languages_url = 'https://qwant.com/region'
  20. category_to_keyword = {'general': 'web',
  21. 'images': 'images',
  22. 'news': 'news',
  23. 'social media': 'social'}
  24. # search-url
  25. url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'
  26. # do search-request
  27. def request(query, params):
  28. offset = (params['pageno'] - 1) * 10
  29. if categories[0] and categories[0] in category_to_keyword:
  30. params['url'] = url.format(keyword=category_to_keyword[categories[0]],
  31. query=urlencode({'q': query}),
  32. offset=offset)
  33. else:
  34. params['url'] = url.format(keyword='web',
  35. query=urlencode({'q': query}),
  36. offset=offset)
  37. # add language tag
  38. language = match_language(params['language'], supported_languages)
  39. params['url'] += '&locale=' + language.replace('-', '_').lower()
  40. return params
  41. # get response from search-request
  42. def response(resp):
  43. results = []
  44. search_results = loads(resp.text)
  45. # return empty array if there are no results
  46. if 'data' not in search_results:
  47. return []
  48. data = search_results.get('data', {})
  49. res = data.get('result', {})
  50. # parse results
  51. for result in res.get('items', {}):
  52. title = html_to_text(result['title'])
  53. res_url = result['url']
  54. content = html_to_text(result['desc'])
  55. if category_to_keyword.get(categories[0], '') == 'web':
  56. results.append({'title': title,
  57. 'content': content,
  58. 'url': res_url})
  59. elif category_to_keyword.get(categories[0], '') == 'images':
  60. thumbnail_src = result['thumbnail']
  61. img_src = result['media']
  62. results.append({'template': 'images.html',
  63. 'url': res_url,
  64. 'title': title,
  65. 'content': '',
  66. 'thumbnail_src': thumbnail_src,
  67. 'img_src': img_src})
  68. elif category_to_keyword.get(categories[0], '') == 'social':
  69. published_date = datetime.fromtimestamp(result['date'], None)
  70. img_src = result.get('img', None)
  71. results.append({'url': res_url,
  72. 'title': title,
  73. 'publishedDate': published_date,
  74. 'content': content,
  75. 'img_src': img_src})
  76. elif category_to_keyword.get(categories[0], '') == 'news':
  77. published_date = datetime.fromtimestamp(result['date'], None)
  78. media = result.get('media', [])
  79. if len(media) > 0:
  80. img_src = media[0].get('pict', {}).get('url', None)
  81. else:
  82. img_src = None
  83. results.append({'url': res_url,
  84. 'title': title,
  85. 'publishedDate': published_date,
  86. 'content': content,
  87. 'img_src': img_src})
  88. return results
  89. # get supported languages from their site
  90. def _fetch_supported_languages(resp):
  91. # list of regions is embedded in page as a js object
  92. response_text = resp.text
  93. response_text = response_text[response_text.find('regionalisation'):]
  94. response_text = response_text[response_text.find('{'):response_text.find(');')]
  95. regions_json = loads(response_text)
  96. supported_languages = []
  97. for lang in regions_json['languages'].values():
  98. if lang['code'] == 'nb':
  99. lang['code'] = 'no'
  100. for country in lang['countries']:
  101. supported_languages.append(lang['code'] + '-' + country)
  102. return supported_languages