logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

archlinux.py (3980B)


  1. # -*- coding: utf-8 -*-
  2. """
  3. Arch Linux Wiki
  4. @website https://wiki.archlinux.org
  5. @provide-api no (Mediawiki provides API, but Arch Wiki blocks access to it
  6. @using-api no
  7. @results HTML
  8. @stable no (HTML can change)
  9. @parse url, title
  10. """
  11. from lxml import html
  12. from searx.engines.xpath import extract_text
  13. from searx.url_utils import urlencode, urljoin
  14. # engine dependent config
  15. categories = ['it']
  16. language_support = True
  17. paging = True
  18. base_url = 'https://wiki.archlinux.org'
  19. # xpath queries
  20. xpath_results = '//ul[@class="mw-search-results"]/li'
  21. xpath_link = './/div[@class="mw-search-result-heading"]/a'
  22. # cut 'en' from 'en-US', 'de' from 'de-CH', and so on
  23. def locale_to_lang_code(locale):
  24. if locale.find('-') >= 0:
  25. locale = locale.split('-')[0]
  26. return locale
  27. # wikis for some languages were moved off from the main site, we need to make
  28. # requests to correct URLs to be able to get results in those languages
  29. lang_urls = {
  30. 'en': {
  31. 'base': 'https://wiki.archlinux.org',
  32. 'search': '/index.php?title=Special:Search&offset={offset}&{query}'
  33. },
  34. 'de': {
  35. 'base': 'https://wiki.archlinux.de',
  36. 'search': '/index.php?title=Spezial:Suche&offset={offset}&{query}'
  37. },
  38. 'fr': {
  39. 'base': 'https://wiki.archlinux.fr',
  40. 'search': '/index.php?title=Spécial:Recherche&offset={offset}&{query}'
  41. },
  42. 'ja': {
  43. 'base': 'https://wiki.archlinuxjp.org',
  44. 'search': '/index.php?title=特別:検索&offset={offset}&{query}'
  45. },
  46. 'ro': {
  47. 'base': 'http://wiki.archlinux.ro',
  48. 'search': '/index.php?title=Special:Căutare&offset={offset}&{query}'
  49. },
  50. 'tr': {
  51. 'base': 'http://archtr.org/wiki',
  52. 'search': '/index.php?title=Özel:Ara&offset={offset}&{query}'
  53. }
  54. }
  55. # get base & search URLs for selected language
  56. def get_lang_urls(language):
  57. if language in lang_urls:
  58. return lang_urls[language]
  59. return lang_urls['en']
  60. # Language names to build search requests for
  61. # those languages which are hosted on the main site.
  62. main_langs = {
  63. 'ar': 'العربية',
  64. 'bg': 'Български',
  65. 'cs': 'Česky',
  66. 'da': 'Dansk',
  67. 'el': 'Ελληνικά',
  68. 'es': 'Español',
  69. 'he': 'עברית',
  70. 'hr': 'Hrvatski',
  71. 'hu': 'Magyar',
  72. 'it': 'Italiano',
  73. 'ko': '한국어',
  74. 'lt': 'Lietuviškai',
  75. 'nl': 'Nederlands',
  76. 'pl': 'Polski',
  77. 'pt': 'Português',
  78. 'ru': 'Русский',
  79. 'sl': 'Slovenský',
  80. 'th': 'ไทย',
  81. 'uk': 'Українська',
  82. 'zh': '简体中文'
  83. }
  84. supported_languages = dict(lang_urls, **main_langs)
  85. # do search-request
  86. def request(query, params):
  87. # translate the locale (e.g. 'en-US') to language code ('en')
  88. language = locale_to_lang_code(params['language'])
  89. # if our language is hosted on the main site, we need to add its name
  90. # to the query in order to narrow the results to that language
  91. if language in main_langs:
  92. query += b' (' + main_langs[language] + b')'
  93. # prepare the request parameters
  94. query = urlencode({'search': query})
  95. offset = (params['pageno'] - 1) * 20
  96. # get request URLs for our language of choice
  97. urls = get_lang_urls(language)
  98. search_url = urls['base'] + urls['search']
  99. params['url'] = search_url.format(query=query, offset=offset)
  100. return params
  101. # get response from search-request
  102. def response(resp):
  103. # get the base URL for the language in which request was made
  104. language = locale_to_lang_code(resp.search_params['language'])
  105. base_url = get_lang_urls(language)['base']
  106. results = []
  107. dom = html.fromstring(resp.text)
  108. # parse results
  109. for result in dom.xpath(xpath_results):
  110. link = result.xpath(xpath_link)[0]
  111. href = urljoin(base_url, link.attrib.get('href'))
  112. title = extract_text(link)
  113. results.append({'url': href,
  114. 'title': title})
  115. return results