logo

searx

My custom branche(s) on searx, a meta-search engine
commit: c1d7d30b8ec2950a6338f0b99ebe9bdc094fdb73
parent: 239299d45ec7698e45451b617f2ef52bfb2c2e88
Author: asciimoo <asciimoo@gmail.com>
Date:   Tue, 11 Feb 2014 13:13:51 +0100

[mod] len() removed from conditions

Diffstat:

Msearx/engines/__init__.py2+-
Msearx/engines/json_engine.py2+-
Msearx/engines/startpage.py2+-
Msearx/engines/xpath.py2+-
Msearx/engines/yacy.py6+++---
Msearx/engines/youtube.py10++++++----
Msearx/search.py6+++---
Msearx/webapp.py6+++---
8 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py @@ -163,7 +163,7 @@ def score_results(results): duplicated = new_res break if duplicated: - if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa + if res.get('content') > duplicated.get('content'): duplicated['content'] = res['content'] duplicated['score'] += score duplicated['engines'].append(res['engine']) diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py @@ -39,7 +39,7 @@ def parse(query): def do_query(data, q): ret = [] - if not len(q): + if not q: return ret qkey = q[0] diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py @@ -35,7 +35,7 @@ def response(resp): title = link.text_content() content = '' - if len(result.xpath('./p[@class="desc"]')): + if result.xpath('./p[@class="desc"]'): content = result.xpath('./p[@class="desc"]')[0].text_content() results.append({'url': url, 'title': title, 'content': content}) diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py @@ -23,7 +23,7 @@ if xpath_results is a string element, then it's already done def extract_text(xpath_results): if type(xpath_results) == list: # it's list of result : concat everything using recursive call - if not len(xpath_results): + if not xpath_results: raise Exception('Empty url resultset') result = '' for e in xpath_results: diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py @@ -13,7 +13,7 @@ def request(query, params): def response(resp): raw_search_results = loads(resp.text) - if not len(raw_search_results): + if not raw_search_results: return [] search_results = raw_search_results.get('channels', {})[0].get('items', []) @@ -26,10 +26,10 @@ def response(resp): tmp_result['url'] = result['link'] tmp_result['content'] = '' - if len(result['description']): + if result['description']: tmp_result['content'] += result['description'] + "<br/>" - if len(result['pubDate']): + if result['pubDate']: tmp_result['content'] += result['pubDate'] + "<br/>" if result['size'] != '-1': diff --git a/searx/engines/youtube.py b/searx/engines/youtube.py @@ -22,9 +22,10 @@ def response(resp): if not 'feed' in search_results: return results feed = search_results['feed'] + for result in feed['entry']: url = [x['href'] for x in result['link'] if x['type'] == 'text/html'] - if not len(url): + if not url: return # remove tracking url = url[0].replace('feature=youtube_gdata', '') @@ -32,12 +33,13 @@ def response(resp): url = url[:-1] title = result['title']['$t'] content = '' - thumbnail = '' - if len(result['media$group']['media$thumbnail']): + + if result['media$group']['media$thumbnail']: thumbnail = result['media$group']['media$thumbnail'][0]['url'] content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa - if len(content): + + if content: content += '<br />' + result['content']['$t'] else: content = result['content']['$t'] diff --git a/searx/search.py b/searx/search.py @@ -49,7 +49,7 @@ class Search(object): self.categories = [] - if len(self.engines): + if self.engines: self.categories = list(set(engine['category'] for engine in self.engines)) else: @@ -59,13 +59,13 @@ class Search(object): if not category in categories: continue self.categories.append(category) - if not len(self.categories): + if not self.categories: cookie_categories = request.cookies.get('categories', '') cookie_categories = cookie_categories.split(',') for ccateg in cookie_categories: if ccateg in categories: self.categories.append(ccateg) - if not len(self.categories): + if not self.categories: self.categories = ['general'] for categ in self.categories: diff --git a/searx/webapp.py b/searx/webapp.py @@ -91,7 +91,7 @@ def render(template_name, **kwargs): for ccateg in cookie_categories: if ccateg in categories: kwargs['selected_categories'].append(ccateg) - if not len(kwargs['selected_categories']): + if not kwargs['selected_categories']: kwargs['selected_categories'] = ['general'] return render_template(template_name, **kwargs) @@ -150,12 +150,12 @@ def index(): elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') - if len(search.results): + if search.results: csv.writerow(keys) for row in search.results: row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) - csv.stream.seek(0) + csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp)