|
|
@ -1,6 +1,7 @@
|
|
|
|
import datetime, os, re
|
|
|
|
import datetime, os, re
|
|
|
|
from bottle import error, response, route, run, static_file, template, TEMPLATE_PATH
|
|
|
|
from bottle import error, response, route, run, static_file, template, TEMPLATE_PATH
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# List up to 3 recommended articles for current article
|
|
|
|
def list_rec(page):
|
|
|
|
def list_rec(page):
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
result = []
|
|
|
|
result = []
|
|
|
@ -24,6 +25,7 @@ def list_rec(page):
|
|
|
|
result.append(data)
|
|
|
|
result.append(data)
|
|
|
|
return result
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# List latest 5 articles as headline links
|
|
|
|
def list_headlines(articles):
|
|
|
|
def list_headlines(articles):
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
result = []
|
|
|
|
result = []
|
|
|
@ -39,18 +41,23 @@ def list_headlines(articles):
|
|
|
|
result.append(b)
|
|
|
|
result.append(b)
|
|
|
|
return result
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return title of article, formatted for sharing via social media
|
|
|
|
def find_social_title(text):
|
|
|
|
def find_social_title(text):
|
|
|
|
return clean(text[1]).replace(' ','+')
|
|
|
|
return clean(text[1]).replace(' ','+')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return URL of article
|
|
|
|
def find_url(path):
|
|
|
|
def find_url(path):
|
|
|
|
return '/' + path.replace('.tpl','')
|
|
|
|
return '/' + path.replace('.tpl','')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return clean timestamp
|
|
|
|
def find_timestamp(text):
|
|
|
|
def find_timestamp(text):
|
|
|
|
return text[2].replace('<br>','')
|
|
|
|
return text[2].replace('<br>','')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return clean title
|
|
|
|
def find_title(text):
|
|
|
|
def find_title(text):
|
|
|
|
return clean(text[1])
|
|
|
|
return clean(text[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return article as list of lines of text
|
|
|
|
def article2list(article, loc):
|
|
|
|
def article2list(article, loc):
|
|
|
|
text = []
|
|
|
|
text = []
|
|
|
|
with open(loc + article) as f:
|
|
|
|
with open(loc + article) as f:
|
|
|
@ -66,6 +73,7 @@ def retrieve_article(page, loc):
|
|
|
|
string += line
|
|
|
|
string += line
|
|
|
|
return string
|
|
|
|
return string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return list of snippets using list of articles
|
|
|
|
def list_snippets(articles):
|
|
|
|
def list_snippets(articles):
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
loc = 'diary/entries/'
|
|
|
|
limit = 4
|
|
|
|
limit = 4
|
|
|
@ -86,17 +94,18 @@ def list_snippets(articles):
|
|
|
|
result.append(a)
|
|
|
|
result.append(a)
|
|
|
|
return result
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return line count of file
|
|
|
|
def count_lines(fname):
|
|
|
|
def count_lines(fname):
|
|
|
|
with open(fname) as f:
|
|
|
|
with open(fname) as f:
|
|
|
|
for linenum, line in enumerate(f,1):
|
|
|
|
for linenum, line in enumerate(f,1):
|
|
|
|
pass
|
|
|
|
pass
|
|
|
|
return linenum
|
|
|
|
return linenum
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return article text without HTML header
|
|
|
|
def find_content(text):
|
|
|
|
def find_content(text):
|
|
|
|
length = len(text)
|
|
|
|
length = len(text)
|
|
|
|
content = ""
|
|
|
|
content = ""
|
|
|
|
# form a string from relevant lines of the article
|
|
|
|
# form a string from relevant lines of the article
|
|
|
|
#with open(loc + article) as f:
|
|
|
|
|
|
|
|
pos = 0
|
|
|
|
pos = 0
|
|
|
|
for line in text:
|
|
|
|
for line in text:
|
|
|
|
# skip to line 5
|
|
|
|
# skip to line 5
|
|
|
@ -105,12 +114,14 @@ def find_content(text):
|
|
|
|
pos += 1
|
|
|
|
pos += 1
|
|
|
|
return content
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Snip article and close any open list tags
|
|
|
|
def prepare_article(text, path):
|
|
|
|
def prepare_article(text, path):
|
|
|
|
content = snip_article(find_content(text), path)
|
|
|
|
content = snip_article(find_content(text), path)
|
|
|
|
if content.count('<ul>') > content.count('</ul>'):
|
|
|
|
if content.count('<ul>') > content.count('</ul>'):
|
|
|
|
content += '</ul>'
|
|
|
|
content += '</ul>'
|
|
|
|
return content
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Remove links, line breaks from snippet
|
|
|
|
def clean(result):
|
|
|
|
def clean(result):
|
|
|
|
result = result.replace('\n','')
|
|
|
|
result = result.replace('\n','')
|
|
|
|
result = result.replace('<br>','')
|
|
|
|
result = result.replace('<br>','')
|
|
|
@ -120,6 +131,7 @@ def clean(result):
|
|
|
|
result = re.sub(r'</h\d>','',result)
|
|
|
|
result = re.sub(r'</h\d>','',result)
|
|
|
|
return result
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return first 300 words of article + " ... "
|
|
|
|
def snip_article(article, path):
|
|
|
|
def snip_article(article, path):
|
|
|
|
article = clean(article)
|
|
|
|
article = clean(article)
|
|
|
|
limit = 300
|
|
|
|
limit = 300
|
|
|
@ -127,10 +139,12 @@ def snip_article(article, path):
|
|
|
|
result = result.rsplit(' ',1)[0]
|
|
|
|
result = result.rsplit(' ',1)[0]
|
|
|
|
return result + " ... "
|
|
|
|
return result + " ... "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Sort diary - newest to oldest
|
|
|
|
def sort_files(files):
|
|
|
|
def sort_files(files):
|
|
|
|
files.sort(reverse=True)
|
|
|
|
files.sort(reverse=True)
|
|
|
|
return files
|
|
|
|
return files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return list of all diary entries (exclude raws + extras)
|
|
|
|
def gather_files(loc):
|
|
|
|
def gather_files(loc):
|
|
|
|
files = os.listdir(loc)
|
|
|
|
files = os.listdir(loc)
|
|
|
|
if 'raw' in files:
|
|
|
|
if 'raw' in files:
|
|
|
|