logo

searx

My custom branche(s) on searx, a meta-search engine git clone https://hacktivis.me/git/searx.git

ina.py (2671B)


  1. # INA (Videos)
  2. #
  3. # @website https://www.ina.fr/
  4. # @provide-api no
  5. #
  6. # @using-api no
  7. # @results HTML (using search portal)
  8. # @stable no (HTML can change)
  9. # @parse url, title, content, publishedDate, thumbnail
  10. #
  11. # @todo set content-parameter with correct data
  12. # @todo embedded (needs some md5 from video page)
  13. from json import loads
  14. from lxml import html
  15. from dateutil import parser
  16. from searx.engines.xpath import extract_text
  17. from searx.url_utils import urlencode
  18. try:
  19. from HTMLParser import HTMLParser
  20. except:
  21. from html.parser import HTMLParser
  22. # engine dependent config
  23. categories = ['videos']
  24. paging = True
  25. page_size = 48
  26. # search-url
  27. base_url = 'https://www.ina.fr'
  28. search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'
  29. # specific xpath variables
  30. results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]'
  31. url_xpath = './/a/@href'
  32. title_xpath = './/h3[@class="h3--title media-heading"]'
  33. thumbnail_xpath = './/img/@src'
  34. publishedDate_xpath = './/span[@class="broadcast"]'
  35. content_xpath = './/p[@class="media-body__summary"]'
  36. # do search-request
  37. def request(query, params):
  38. params['url'] = search_url.format(ps=page_size,
  39. start=params['pageno'] * page_size,
  40. query=urlencode({'q': query}))
  41. return params
  42. # get response from search-request
  43. def response(resp):
  44. results = []
  45. # we get html in a JSON container...
  46. response = loads(resp.text)
  47. if "content" not in response:
  48. return []
  49. dom = html.fromstring(response["content"])
  50. p = HTMLParser()
  51. # parse results
  52. for result in dom.xpath(results_xpath):
  53. videoid = result.xpath(url_xpath)[0]
  54. url = base_url + videoid
  55. title = p.unescape(extract_text(result.xpath(title_xpath)))
  56. thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
  57. if thumbnail[0] == '/':
  58. thumbnail = base_url + thumbnail
  59. d = extract_text(result.xpath(publishedDate_xpath)[0])
  60. d = d.split('/')
  61. # force ISO date to avoid wrong parsing
  62. d = "%s-%s-%s" % (d[2], d[1], d[0])
  63. publishedDate = parser.parse(d)
  64. content = extract_text(result.xpath(content_xpath))
  65. # append result
  66. results.append({'url': url,
  67. 'title': title,
  68. 'content': content,
  69. 'template': 'videos.html',
  70. 'publishedDate': publishedDate,
  71. 'thumbnail': thumbnail})
  72. # return results
  73. return results