|
|
|
@ -1,6 +1,7 @@
|
|
|
|
|
import datetime, os, re
|
|
|
|
|
from bottle import error, response, route, run, static_file, template, TEMPLATE_PATH
|
|
|
|
|
|
|
|
|
|
# List up to 3 recommended articles for current article
|
|
|
|
|
def list_rec(page):
|
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
|
result = []
|
|
|
|
@ -24,6 +25,7 @@ def list_rec(page):
|
|
|
|
|
result.append(data)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
# List latest 5 articles as headline links
|
|
|
|
|
def list_headlines(articles):
|
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
|
result = []
|
|
|
|
@ -39,18 +41,23 @@ def list_headlines(articles):
|
|
|
|
|
result.append(b)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
# Return title of article, formatted for sharing via social media
|
|
|
|
|
def find_social_title(text):
|
|
|
|
|
return clean(text[1]).replace(' ','+')
|
|
|
|
|
|
|
|
|
|
# Return URL of article
|
|
|
|
|
def find_url(path):
|
|
|
|
|
return '/' + path.replace('.tpl','')
|
|
|
|
|
|
|
|
|
|
# Return clean timestamp
|
|
|
|
|
def find_timestamp(text):
|
|
|
|
|
return text[2].replace('<br>','')
|
|
|
|
|
|
|
|
|
|
# Return clean title
|
|
|
|
|
def find_title(text):
|
|
|
|
|
return clean(text[1])
|
|
|
|
|
|
|
|
|
|
# Return article as list of lines of text
|
|
|
|
|
def article2list(article, loc):
|
|
|
|
|
text = []
|
|
|
|
|
with open(loc + article) as f:
|
|
|
|
@ -66,6 +73,7 @@ def retrieve_article(page, loc):
|
|
|
|
|
string += line
|
|
|
|
|
return string
|
|
|
|
|
|
|
|
|
|
# Return list of snippets using list of articles
|
|
|
|
|
def list_snippets(articles):
|
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
|
limit = 4
|
|
|
|
@ -86,17 +94,18 @@ def list_snippets(articles):
|
|
|
|
|
result.append(a)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
# Return line count of file
|
|
|
|
|
def count_lines(fname):
|
|
|
|
|
with open(fname) as f:
|
|
|
|
|
for linenum, line in enumerate(f,1):
|
|
|
|
|
pass
|
|
|
|
|
return linenum
|
|
|
|
|
|
|
|
|
|
# Return article text without HTML header
|
|
|
|
|
def find_content(text):
|
|
|
|
|
length = len(text)
|
|
|
|
|
content = ""
|
|
|
|
|
# form a string from relevant lines of the article
|
|
|
|
|
#with open(loc + article) as f:
|
|
|
|
|
pos = 0
|
|
|
|
|
for line in text:
|
|
|
|
|
# skip to line 5
|
|
|
|
@ -105,12 +114,14 @@ def find_content(text):
|
|
|
|
|
pos += 1
|
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
# Snip article and close any open list tags
|
|
|
|
|
def prepare_article(text, path):
|
|
|
|
|
content = snip_article(find_content(text), path)
|
|
|
|
|
if content.count('<ul>') > content.count('</ul>'):
|
|
|
|
|
content += '</ul>'
|
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
# Remove links, line breaks from snippet
|
|
|
|
|
def clean(result):
|
|
|
|
|
result = result.replace('\n','')
|
|
|
|
|
result = result.replace('<br>','')
|
|
|
|
@ -120,6 +131,7 @@ def clean(result):
|
|
|
|
|
result = re.sub(r'</h\d>','',result)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
# Return first 300 words of article + " ... "
|
|
|
|
|
def snip_article(article, path):
|
|
|
|
|
article = clean(article)
|
|
|
|
|
limit = 300
|
|
|
|
@ -127,10 +139,12 @@ def snip_article(article, path):
|
|
|
|
|
result = result.rsplit(' ',1)[0]
|
|
|
|
|
return result + " ... "
|
|
|
|
|
|
|
|
|
|
# Sort diary - newest to oldest
|
|
|
|
|
def sort_files(files):
|
|
|
|
|
files.sort(reverse=True)
|
|
|
|
|
return files
|
|
|
|
|
|
|
|
|
|
# Return list of all diary entries (exclude raws + extras)
|
|
|
|
|
def gather_files(loc):
|
|
|
|
|
files = os.listdir(loc)
|
|
|
|
|
if 'raw' in files:
|
|
|
|
|