commit: b88146d669b1196ed1efc4ae4108e238cfd7dbca
parent a559bad4884fa9dc60d44cd580812e1e2fedef80
Author: Thomas Pointhuber <thomas.pointhuber@gmx.at>
Date: Fri, 14 Mar 2014 09:55:04 +0100
showing publishedDate for news
Diffstat:
4 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
@@ -2,6 +2,7 @@
from urllib import urlencode
from json import loads
+from datetime import datetime, timedelta
categories = ['news']
@@ -31,7 +32,15 @@ def response(resp):
return []
for result in search_res['responseData']['results']:
+# S.149 (159), library.pdf
+# datetime.strptime("Mon, 10 Mar 2014 16:26:15 -0700", "%a, %d %b %Y %H:%M:%S %z")
+# publishedDate = parse(result['publishedDate'])
+ publishedDate = datetime.strptime(str.join(' ',result['publishedDate'].split(None)[0:5]), "%a, %d %b %Y %H:%M:%S")
+ #utc_offset = timedelta(result['publishedDate'].split(None)[5]) # local = utc + offset
+ #publishedDate = publishedDate + utc_offset
+
results.append({'url': result['unescapedUrl'],
'title': result['titleNoFormatting'],
+ 'publishedDate': publishedDate,
'content': result['content']})
return results
diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py
@@ -4,6 +4,7 @@ from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text, extract_url
from searx.engines.yahoo import parse_url
+from datetime import datetime
categories = ['news']
search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
@@ -11,6 +12,7 @@ results_xpath = '//div[@class="res"]'
url_xpath = './/h3/a/@href'
title_xpath = './/h3/a'
content_xpath = './/div[@class="abstr"]'
+publishedDate_xpath = './/span[@class="timestamp"]'
suggestion_xpath = '//div[@id="satat"]//a'
paging = True
@@ -37,7 +39,10 @@ def response(resp):
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
title = extract_text(result.xpath(title_xpath)[0])
content = extract_text(result.xpath(content_xpath)[0])
- results.append({'url': url, 'title': title, 'content': content})
+# Feb 20 04:02am
+ publishedDate = datetime.strptime(extract_text(result.xpath(publishedDate_xpath)[0]),"%b %d %H:%M%p")
+ #publishedDate.replace(year=2014)
+ results.append({'url': url, 'title': title, 'content': content,'publishedDate':publishedDate})
if not suggestion_xpath:
return results
diff --git a/searx/templates/result_templates/default.html b/searx/templates/result_templates/default.html
@@ -6,6 +6,7 @@
<div>
<h3 class="result_title"><a href="{{ result.url }}">{{ result.title|safe }}</a></h3>
+ {% if result.publishedDate %}<p class="published_date">{{ result.publishedDate }}</p>{% endif %}
<p class="content">{% if result.content %}{{ result.content|safe }}<br />{% endif %}</p>
<p class="url">{{ result.pretty_url }}</p>
</div>
diff --git a/searx/webapp.py b/searx/webapp.py
@@ -26,6 +26,7 @@ import json
import cStringIO
import os
+from datetime import datetime, timedelta
from itertools import chain
from flask import (
Flask, request, render_template, url_for, Response, make_response,
@@ -156,6 +157,17 @@ def index():
if engine in favicons:
result['favicon'] = engine
+ # TODO, check if timezone is calculated right
+ if 'publishedDate' in result:
+ if result['publishedDate'].date() == datetime.now().date():
+ timedifference = datetime.now()-result['publishedDate']
+ if timedifference.seconds < 60*60:
+ result['publishedDate'] = '{0:d} minutes ago'.format(timedifference.seconds/60)
+ else:
+ result['publishedDate'] = '{0:d} hours ago'.format(timedifference.seconds/60/60)
+ else:
+ result['publishedDate'] = result['publishedDate'].strftime('%d.%m.%Y')
+
if search.request_data.get('format') == 'json':
return Response(json.dumps({'query': search.query,
'results': search.results}),