commit: a508d540ac43196edeb4d946dfdf64a1d0a438ed
parent 91f9973227e003604e036278e42a4c6394bf5478
Author: Thomas Pointhuber <thomas.pointhuber@gmx.at>
Date: Tue, 16 Dec 2014 17:26:16 +0100
[fix] pep8
Diffstat:
6 files changed, 35 insertions(+), 17 deletions(-)
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
@@ -57,12 +57,16 @@ def response(resp):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[@class="sn_snip"]//text()')
if contentXPath is not None:
content = escape(' '.join(contentXPath))
# parse publishedDate
- publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
+ publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[contains(@class,"sn_ST")]'
+ '//span[contains(@class,"sn_tm")]'
+ '//text()')
if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath))
@@ -74,7 +78,8 @@ def response(resp):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
+ elif re.match("^[0-9]+ hour(s|),"
+ " [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
@@ -22,10 +22,17 @@ api_key = None
# search-url
url = 'http://www.faroo.com/'
-search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
+search_url = url + 'api?{query}'\
+ '&start={offset}'\
+ '&length={number_of_results}'\
+ '&l={language}'\
+ '&src={categorie}'\
+ '&i=false'\
+ '&f=json'\
+ '&key={api_key}' # noqa
search_category = {'general': 'web',
- 'news': 'news'}
+ 'news': 'news'}
# do search-request
@@ -80,8 +87,8 @@ def response(resp):
# parse results
for result in search_res['results']:
if result['news']:
- # timestamp (how many milliseconds have passed between now and the beginning of 1970)
- publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)
+ # timestamp (milliseconds since 1970)
+ publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result
results.append({'url': result['url'],
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
@@ -9,7 +9,7 @@
# @stable yes (but deprecated)
# @parse url, title, img_src
-from urllib import urlencode,unquote
+from urllib import urlencode, unquote
from json import loads
# engine dependent config
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
@@ -1,8 +1,8 @@
## Kickass Torrent (Videos, Music, Files)
-#
+#
# @website https://kickass.so
# @provide-api no (nothing found)
-#
+#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
@@ -13,7 +13,6 @@ from cgi import escape
from urllib import quote
from lxml import html
from operator import itemgetter
-from dateutil import parser
# engine dependent config
categories = ['videos', 'music', 'files']
@@ -33,7 +32,8 @@ def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'])
- # FIX: SSLError: hostname 'kickass.so' doesn't match either of '*.kickass.to', 'kickass.to'
+ # FIX: SSLError: hostname 'kickass.so'
+ # doesn't match either of '*.kickass.to', 'kickass.to'
params['verify'] = False
return params
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url
url = 'https://api.soundcloud.com/'
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
+search_url = url + 'search?{query}'\
+ '&facet=model'\
+ '&limit=20'\
+ '&offset={offset}'\
+ '&linked_partitioning=1'\
+ '&client_id={client_id}' # noqa
# do search-request
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
@@ -20,7 +20,8 @@ paging = True
language_support = True
# search-url
-search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
+base_url = 'https://search.yahoo.com/'
+search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables
results_xpath = '//div[@class="res"]'
@@ -57,9 +58,9 @@ def request(query, params):
else:
language = params['language'].split('_')[0]
- params['url'] = search_url.format(offset=offset,
- query=urlencode({'p': query}),
- lang=language)
+ params['url'] = base_url + search_url.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language)
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\