logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

google.py (13565B)


  1. # Google (Web)
  2. #
  3. # @website https://www.google.com
  4. # @provide-api yes (https://developers.google.com/custom-search/)
  5. #
  6. # @using-api no
  7. # @results HTML
  8. # @stable no (HTML can change)
  9. # @parse url, title, content, suggestion
  10. import re
  11. from flask_babel import gettext
  12. from lxml import html, etree
  13. from searx.engines.xpath import extract_text, extract_url
  14. from searx import logger
  15. from searx.url_utils import urlencode, urlparse, parse_qsl
  16. from searx.utils import match_language
  17. logger = logger.getChild('google engine')
  18. # engine dependent config
  19. categories = ['general']
  20. paging = True
  21. language_support = True
  22. use_locale_domain = True
  23. time_range_support = True
  24. # based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
  25. default_hostname = 'www.google.com'
  26. country_to_hostname = {
  27. 'BG': 'www.google.bg', # Bulgaria
  28. 'CZ': 'www.google.cz', # Czech Republic
  29. 'DE': 'www.google.de', # Germany
  30. 'DK': 'www.google.dk', # Denmark
  31. 'AT': 'www.google.at', # Austria
  32. 'CH': 'www.google.ch', # Switzerland
  33. 'GR': 'www.google.gr', # Greece
  34. 'AU': 'www.google.com.au', # Australia
  35. 'CA': 'www.google.ca', # Canada
  36. 'GB': 'www.google.co.uk', # United Kingdom
  37. 'ID': 'www.google.co.id', # Indonesia
  38. 'IE': 'www.google.ie', # Ireland
  39. 'IN': 'www.google.co.in', # India
  40. 'MY': 'www.google.com.my', # Malaysia
  41. 'NZ': 'www.google.co.nz', # New Zealand
  42. 'PH': 'www.google.com.ph', # Philippines
  43. 'SG': 'www.google.com.sg', # Singapore
  44. # 'US': 'www.google.us', # United States, redirect to .com
  45. 'ZA': 'www.google.co.za', # South Africa
  46. 'AR': 'www.google.com.ar', # Argentina
  47. 'CL': 'www.google.cl', # Chile
  48. 'ES': 'www.google.es', # Spain
  49. 'MX': 'www.google.com.mx', # Mexico
  50. 'EE': 'www.google.ee', # Estonia
  51. 'FI': 'www.google.fi', # Finland
  52. 'BE': 'www.google.be', # Belgium
  53. 'FR': 'www.google.fr', # France
  54. 'IL': 'www.google.co.il', # Israel
  55. 'HR': 'www.google.hr', # Croatia
  56. 'HU': 'www.google.hu', # Hungary
  57. 'IT': 'www.google.it', # Italy
  58. 'JP': 'www.google.co.jp', # Japan
  59. 'KR': 'www.google.co.kr', # South Korea
  60. 'LT': 'www.google.lt', # Lithuania
  61. 'LV': 'www.google.lv', # Latvia
  62. 'NO': 'www.google.no', # Norway
  63. 'NL': 'www.google.nl', # Netherlands
  64. 'PL': 'www.google.pl', # Poland
  65. 'BR': 'www.google.com.br', # Brazil
  66. 'PT': 'www.google.pt', # Portugal
  67. 'RO': 'www.google.ro', # Romania
  68. 'RU': 'www.google.ru', # Russia
  69. 'SK': 'www.google.sk', # Slovakia
  70. 'SI': 'www.google.si', # Slovenia
  71. 'SE': 'www.google.se', # Sweden
  72. 'TH': 'www.google.co.th', # Thailand
  73. 'TR': 'www.google.com.tr', # Turkey
  74. 'UA': 'www.google.com.ua', # Ukraine
  75. # 'CN': 'www.google.cn', # China, only from China ?
  76. 'HK': 'www.google.com.hk', # Hong Kong
  77. 'TW': 'www.google.com.tw' # Taiwan
  78. }
  79. # osm
  80. url_map = 'https://www.openstreetmap.org/'\
  81. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  82. # search-url
  83. search_path = '/search'
  84. search_url = ('https://{hostname}' +
  85. search_path +
  86. '?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&hl={lang_short}&ei=x')
  87. time_range_search = "&tbs=qdr:{range}"
  88. time_range_dict = {'day': 'd',
  89. 'week': 'w',
  90. 'month': 'm',
  91. 'year': 'y'}
  92. # other URLs
  93. map_hostname_start = 'maps.google.'
  94. maps_path = '/maps'
  95. redirect_path = '/url'
  96. images_path = '/images'
  97. supported_languages_url = 'https://www.google.com/preferences?#languages'
  98. # specific xpath variables
  99. results_xpath = '//div[@class="g"]'
  100. url_xpath = './/h3/a/@href'
  101. title_xpath = './/h3'
  102. content_xpath = './/span[@class="st"]'
  103. content_misc_xpath = './/div[@class="f slp"]'
  104. suggestion_xpath = '//p[@class="_Bmc"]'
  105. spelling_suggestion_xpath = '//a[@class="spell"]'
  106. # map : detail location
  107. map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()'
  108. map_phone_xpath = './/div[@class="s"]//table//td[2]/span/span'
  109. map_website_url_xpath = 'h3[2]/a/@href'
  110. map_website_title_xpath = 'h3[2]'
  111. # map : near the location
  112. map_near = 'table[@class="ts"]//tr'
  113. map_near_title = './/h4'
  114. map_near_url = './/h4/a/@href'
  115. map_near_phone = './/span[@class="nobr"]'
  116. # images
  117. images_xpath = './/div/a'
  118. image_url_xpath = './@href'
  119. image_img_src_xpath = './img/@src'
  120. # property names
  121. # FIXME : no translation
  122. property_address = "Address"
  123. property_phone = "Phone number"
  124. # remove google-specific tracking-url
  125. def parse_url(url_string, google_hostname):
  126. # sanity check
  127. if url_string is None:
  128. return url_string
  129. # normal case
  130. parsed_url = urlparse(url_string)
  131. if (parsed_url.netloc in [google_hostname, '']
  132. and parsed_url.path == redirect_path):
  133. query = dict(parse_qsl(parsed_url.query))
  134. return query['q']
  135. else:
  136. return url_string
  137. # returns extract_text on the first result selected by the xpath or None
  138. def extract_text_from_dom(result, xpath):
  139. r = result.xpath(xpath)
  140. if len(r) > 0:
  141. return extract_text(r[0])
  142. return None
  143. # do search-request
  144. def request(query, params):
  145. offset = (params['pageno'] - 1) * 10
  146. language = match_language(params['language'], supported_languages)
  147. language_array = language.split('-')
  148. if params['language'].find('-') > 0:
  149. country = params['language'].split('-')[1]
  150. elif len(language_array) == 2:
  151. country = language_array[1]
  152. else:
  153. country = 'US'
  154. url_lang = 'lang_' + language
  155. if use_locale_domain:
  156. google_hostname = country_to_hostname.get(country.upper(), default_hostname)
  157. else:
  158. google_hostname = default_hostname
  159. # original format: ID=3e2b6616cee08557:TM=5556667580:C=r:IP=4.1.12.5-:S=23ASdf0soFgF2d34dfgf-_22JJOmHdfgg
  160. params['cookies']['GOOGLE_ABUSE_EXEMPTION'] = 'x'
  161. params['url'] = search_url.format(offset=offset,
  162. query=urlencode({'q': query}),
  163. hostname=google_hostname,
  164. lang=url_lang,
  165. lang_short=language)
  166. if params['time_range'] in time_range_dict:
  167. params['url'] += time_range_search.format(range=time_range_dict[params['time_range']])
  168. params['headers']['Accept-Language'] = language + ',' + language + '-' + country
  169. params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
  170. params['google_hostname'] = google_hostname
  171. return params
  172. # get response from search-request
  173. def response(resp):
  174. results = []
  175. # detect google sorry
  176. resp_url = urlparse(resp.url)
  177. if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
  178. raise RuntimeWarning('sorry.google.com')
  179. if resp_url.path.startswith('/sorry'):
  180. raise RuntimeWarning(gettext('CAPTCHA required'))
  181. # which hostname ?
  182. google_hostname = resp.search_params.get('google_hostname')
  183. google_url = "https://" + google_hostname
  184. # convert the text to dom
  185. dom = html.fromstring(resp.text)
  186. instant_answer = dom.xpath('//div[@id="_vBb"]//text()')
  187. if instant_answer:
  188. results.append({'answer': u' '.join(instant_answer)})
  189. try:
  190. results_num = int(dom.xpath('//div[@id="resultStats"]//text()')[0]
  191. .split()[1].replace(',', ''))
  192. results.append({'number_of_results': results_num})
  193. except:
  194. pass
  195. # parse results
  196. for result in dom.xpath(results_xpath):
  197. try:
  198. title = extract_text(result.xpath(title_xpath)[0])
  199. url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname)
  200. parsed_url = urlparse(url, google_hostname)
  201. # map result
  202. if parsed_url.netloc == google_hostname:
  203. # TODO fix inside links
  204. continue
  205. # if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start):
  206. # print "yooooo"*30
  207. # x = result.xpath(map_near)
  208. # if len(x) > 0:
  209. # # map : near the location
  210. # results = results + parse_map_near(parsed_url, x, google_hostname)
  211. # else:
  212. # # map : detail about a location
  213. # results = results + parse_map_detail(parsed_url, result, google_hostname)
  214. # # google news
  215. # elif parsed_url.path == search_path:
  216. # # skipping news results
  217. # pass
  218. # # images result
  219. # elif parsed_url.path == images_path:
  220. # # only thumbnail image provided,
  221. # # so skipping image results
  222. # # results = results + parse_images(result, google_hostname)
  223. # pass
  224. else:
  225. # normal result
  226. content = extract_text_from_dom(result, content_xpath)
  227. if content is None:
  228. continue
  229. content_misc = extract_text_from_dom(result, content_misc_xpath)
  230. if content_misc is not None:
  231. content = content_misc + "<br />" + content
  232. # append result
  233. results.append({'url': url,
  234. 'title': title,
  235. 'content': content
  236. })
  237. except:
  238. logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
  239. continue
  240. # parse suggestion
  241. for suggestion in dom.xpath(suggestion_xpath):
  242. # append suggestion
  243. results.append({'suggestion': extract_text(suggestion)})
  244. for correction in dom.xpath(spelling_suggestion_xpath):
  245. results.append({'correction': extract_text(correction)})
  246. # return results
  247. return results
  248. def parse_images(result, google_hostname):
  249. results = []
  250. for image in result.xpath(images_xpath):
  251. url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname)
  252. img_src = extract_text(image.xpath(image_img_src_xpath)[0])
  253. # append result
  254. results.append({'url': url,
  255. 'title': '',
  256. 'content': '',
  257. 'img_src': img_src,
  258. 'template': 'images.html'
  259. })
  260. return results
  261. def parse_map_near(parsed_url, x, google_hostname):
  262. results = []
  263. for result in x:
  264. title = extract_text_from_dom(result, map_near_title)
  265. url = parse_url(extract_text_from_dom(result, map_near_url), google_hostname)
  266. attributes = []
  267. phone = extract_text_from_dom(result, map_near_phone)
  268. add_attributes(attributes, property_phone, phone, 'tel:' + phone)
  269. results.append({'title': title,
  270. 'url': url,
  271. 'content': attributes_to_html(attributes)
  272. })
  273. return results
  274. def parse_map_detail(parsed_url, result, google_hostname):
  275. results = []
  276. # try to parse the geoloc
  277. m = re.search(r'@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
  278. if m is None:
  279. m = re.search(r'll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
  280. if m is not None:
  281. # geoloc found (ignored)
  282. lon = float(m.group(2)) # noqa
  283. lat = float(m.group(1)) # noqa
  284. zoom = int(m.group(3)) # noqa
  285. # attributes
  286. attributes = []
  287. address = extract_text_from_dom(result, map_address_xpath)
  288. phone = extract_text_from_dom(result, map_phone_xpath)
  289. add_attributes(attributes, property_address, address, 'geo:' + str(lat) + ',' + str(lon))
  290. add_attributes(attributes, property_phone, phone, 'tel:' + phone)
  291. # title / content / url
  292. website_title = extract_text_from_dom(result, map_website_title_xpath)
  293. content = extract_text_from_dom(result, content_xpath)
  294. website_url = parse_url(extract_text_from_dom(result, map_website_url_xpath), google_hostname)
  295. # add a result if there is a website
  296. if website_url is not None:
  297. results.append({'title': website_title,
  298. 'content': (content + '<br />' if content is not None else '')
  299. + attributes_to_html(attributes),
  300. 'url': website_url
  301. })
  302. return results
  303. def add_attributes(attributes, name, value, url):
  304. if value is not None and len(value) > 0:
  305. attributes.append({'label': name, 'value': value, 'url': url})
  306. def attributes_to_html(attributes):
  307. retval = '<table class="table table-striped">'
  308. for a in attributes:
  309. value = a.get('value')
  310. if 'url' in a:
  311. value = '<a href="' + a.get('url') + '">' + value + '</a>'
  312. retval = retval + '<tr><th>' + a.get('label') + '</th><td>' + value + '</td></tr>'
  313. retval = retval + '</table>'
  314. return retval
  315. # get supported languages from their site
  316. def _fetch_supported_languages(resp):
  317. supported_languages = {}
  318. dom = html.fromstring(resp.text)
  319. options = dom.xpath('//table//td/font/label/span')
  320. for option in options:
  321. code = option.xpath('./@id')[0][1:]
  322. name = option.text.title()
  323. supported_languages[code] = {"name": name}
  324. return supported_languages