logo

searx

My custom branche(s) on searx, a meta-search engine
commit: 5c9f6d51746796ef63cd08d27c31e931272e083f
parent: 556b9dd2b0a17528a1c6f2f581e7a0173bc7d4cf
Author: Adam Tauber <asciimoo@gmail.com>
Date:   Sun,  5 Jan 2014 05:49:39 -0800

Merge pull request #16 from dalf/master

bug fixes

Diffstat:

Mengines.cfg_sample16+++++++++-------
Msearx/engines/bing.py4++--
Msearx/engines/dailymotion.py12+++++++++---
Msearx/engines/duckduckgo.py5+++--
Msearx/engines/duckduckgo_definitions.py2+-
Msearx/engines/flickr.py0
Msearx/engines/google_images.py0
Msearx/engines/startpage.py11+++++------
Msearx/engines/xpath.py76++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------
9 files changed, 85 insertions(+), 41 deletions(-)

diff --git a/engines.cfg_sample b/engines.cfg_sample @@ -5,7 +5,7 @@ number_of_results = 1 [bing] engine = bing -language = en-us +locale = en-US [cc] engine=currency_convert @@ -20,6 +20,7 @@ engine = duckduckgo_definitions [duckduckgo] engine = duckduckgo +locale = en-us [flickr] engine = flickr @@ -63,17 +64,17 @@ categories = social media [urbandictionary] engine = xpath search_url = http://www.urbandictionary.com/define.php?term={query} -url_xpath = //div[@id="entries"]//div[@class="word"]//a -title_xpath = //div[@id="entries"]//div[@class="word"]//span//text() -content_xpath = //div[@id="entries"]//div[@class="text"]//div[@class="definition"]//text() +url_xpath = //div[@id="entries"]//div[@class="word"]/a/@href +title_xpath = //div[@id="entries"]//div[@class="word"]/span +content_xpath = //div[@id="entries"]//div[@class="text"]/div[@class="definition"] [yahoo] engine = xpath search_url = http://search.yahoo.com/search?p={query} results_xpath = //div[@class="res"] -url_xpath = .//span[@class="url"]//text() -content_xpath = .//div[@class="abstr"]//text() -title_xpath = .//h3/a//text() +url_xpath = .//h3/a/@href +title_xpath = .//h3/a +content_xpath = .//div[@class="abstr"] suggestion_xpath = //div[@id="satat"]//a [youtube] @@ -82,5 +83,6 @@ categories = videos [dailymotion] engine = dailymotion +locale = en_US categories = videos diff --git a/searx/engines/bing.py b/searx/engines/bing.py @@ -4,11 +4,11 @@ from cgi import escape base_url = 'http://www.bing.com/' search_string = 'search?{query}' -language = 'en-us' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx +locale = 'en-US' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx def request(query, params): - search_path = search_string.format(query=urlencode({'q': query, 'setmkt': language})) + search_path = search_string.format(query=urlencode({'q': query, 'setmkt': locale})) #if params['category'] == 'images': # params['url'] = base_url + 'images/' + search_path params['url'] = base_url + search_path diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py @@ -1,16 +1,17 @@ from urllib import urlencode +from lxml import html from json import loads from cgi import escape categories = ['videos'] -localization = 'en' +locale = 'en_US' # see http://www.dailymotion.com/doc/api/obj-video.html search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}' def request(query, params): global search_url - params['url'] = search_url.format(query=urlencode({'search': query, 'localization': localization })) + params['url'] = search_url.format(query=urlencode({'search': query, 'localization': locale })) return params @@ -27,6 +28,11 @@ def response(resp): else: content = '' if res['description']: - content += escape(res['description'][:500]) + description = text_content_from_html(res['description']) + content += description[:500] results.append({'url': url, 'title': title, 'content': content}) return results + +def text_content_from_html(html_string): + desc_html = html.fragment_fromstring(html_string, create_parent=True) + return desc_html.text_content() diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py @@ -3,10 +3,11 @@ from urllib import urlencode from searx.utils import html_to_text url = 'https://duckduckgo.com/' -search_url = url + 'd.js?{query}&l=us-en&p=1&s=0' +search_url = url + 'd.js?{query}&p=1&s=0' +locale = 'us-en' def request(query, params): - params['url'] = search_url.format(query=urlencode({'q': query})) + params['url'] = search_url.format(query=urlencode({'q': query, 'l': locale})) return params diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py @@ -1,7 +1,7 @@ import json from urllib import urlencode -url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0' +url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1' def request(query, params): params['url'] = url.format(query=urlencode({'q': query})) diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py @@ -19,14 +19,13 @@ def response(resp): global base_url results = [] dom = html.fromstring(resp.content) - for result in dom.xpath('//div[@class="result"]'): + # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] + # not ads : div[@class="result"] are the direct childs of div[@id="results"] + for result in dom.xpath('//div[@id="results"]/div[@class="result"]'): link = result.xpath('.//h3/a')[0] url = link.attrib.get('href') parsed_url = urlparse(url) - # TODO better google link detection - if parsed_url.netloc.find('www.google.com') >= 0: - continue - title = ' '.join(link.xpath('.//text()')) - content = escape(' '.join(result.xpath('.//p[@class="desc"]//text()'))) + title = link.text_content() + content = result.xpath('./p[@class="desc"]')[0].text_content() results.append({'url': url, 'title': title, 'content': content}) return results diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py @@ -1,5 +1,5 @@ from lxml import html -from urllib import urlencode +from urllib import urlencode, unquote from urlparse import urlparse, urljoin from cgi import escape from lxml.etree import _ElementStringResult @@ -11,32 +11,64 @@ title_xpath = None suggestion_xpath = '' results_xpath = '' -def extract_url(xpath_results): - url = '' - parsed_search_url = urlparse(search_url) +''' +if xpath_results is list, extract the text from each result and concat the list +if xpath_results is a xml element, extract all the text node from it ( text_content() method from lxml ) +if xpath_results is a string element, then it's already done +''' +def extract_text(xpath_results): if type(xpath_results) == list: + # it's list of result : concat everything using recursive call if not len(xpath_results): raise Exception('Empty url resultset') - if type(xpath_results[0]) == _ElementStringResult: - url = ''.join(xpath_results) - if url.startswith('//'): - url = parsed_search_url.scheme+url - elif url.startswith('/'): - url = urljoin(search_url, url) - #TODO - else: - url = xpath_results[0].attrib.get('href') + result = '' + for e in xpath_results: + result = result + extract_text(e) + return result + elif type(xpath_results) == _ElementStringResult: + # it's a string + return ''.join(xpath_results) else: - url = xpath_results.attrib.get('href') - if not url.startswith('http://') and not url.startswith('https://'): - url = 'http://'+url + # it's a element + return xpath_results.text_content() + + +def extract_url(xpath_results): + url = extract_text(xpath_results) + + if url.startswith('//'): + # add http or https to this kind of url //example.com/ + parsed_search_url = urlparse(search_url) + url = parsed_search_url.scheme+url + elif url.startswith('/'): + # fix relative url to the search engine + url = urljoin(search_url, url) + + # normalize url + url = normalize_url(url) + + return url + + +def normalize_url(url): parsed_url = urlparse(url) + + # add a / at this end of the url if there is no path if not parsed_url.netloc: raise Exception('Cannot parse url') if not parsed_url.path: url += '/' + + # FIXME : hack for yahoo + if parsed_url.hostname == 'search.yahoo.com' and parsed_url.path.startswith('/r'): + p = parsed_url.path + mark = p.find('/**') + if mark != -1: + return unquote(p[mark+3:]).decode('utf-8') + return url + def request(query, params): query = urlencode({'q': query})[2:] params['url'] = search_url.format(query=query) @@ -50,15 +82,19 @@ def response(resp): if results_xpath: for result in dom.xpath(results_xpath): url = extract_url(result.xpath(url_xpath)) - title = ' '.join(result.xpath(title_xpath)) - content = escape(' '.join(result.xpath(content_xpath))) + title = extract_text(result.xpath(title_xpath)[0 ]) + content = extract_text(result.xpath(content_xpath)[0]) results.append({'url': url, 'title': title, 'content': content}) else: - for content, url, title in zip(dom.xpath(content_xpath), map(extract_url, dom.xpath(url_xpath)), dom.xpath(title_xpath)): + for url, title, content in zip( + map(extract_url, dom.xpath(url_xpath)), \ + map(extract_text, dom.xpath(title_xpath)), \ + map(extract_text, dom.xpath(content_xpath)), \ + ): results.append({'url': url, 'title': title, 'content': content}) if not suggestion_xpath: return results for suggestion in dom.xpath(suggestion_xpath): - results.append({'suggestion': escape(''.join(suggestion.xpath('.//text()')))}) + results.append({'suggestion': extract_text(suggestion)}) return results