embedded itch.io game
parent
0ba64fc6c0
commit
fc928148ad
@ -1,506 +0,0 @@
|
|||||||
import datetime, os, re
|
|
||||||
from bottle import error, response, route, run, static_file, template, TEMPLATE_PATH
|
|
||||||
|
|
||||||
def clean_tags(raw):
|
|
||||||
cleanr = re.compile('<.*?>')
|
|
||||||
cleantext = re.sub(cleanr, '', raw)
|
|
||||||
return cleantext
|
|
||||||
|
|
||||||
# rss feed
|
|
||||||
|
|
||||||
# RSS Generation
|
|
||||||
def make_rss():
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
info = {'items': list_items(gather_and_sort(loc)[0:15])}
|
|
||||||
|
|
||||||
# Delete old version
|
|
||||||
def clear_file(f_name):
|
|
||||||
if os.path.exists(f_name):
|
|
||||||
print("removing " + f_name)
|
|
||||||
os.remove(f_name)
|
|
||||||
f = open(f_name, 'a+')
|
|
||||||
|
|
||||||
def format_rss_time(date):
|
|
||||||
return datetime.datetime.strptime(date, '%y%m%d').strftime('%a') + ', ' + datetime.datetime.strptime(date, '%y%m%d').strftime('%d %b %Y') + " 05:00:05 GMT"
|
|
||||||
|
|
||||||
# Return list of items using list of articles
|
|
||||||
def list_items(articles):
|
|
||||||
f_name = "static/xml/blessfrey.xml"
|
|
||||||
loc2 = 'https://www.blessfrey.me'
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
loc3 = loc2 + loc
|
|
||||||
result = []
|
|
||||||
|
|
||||||
for article in articles:
|
|
||||||
path = loc + article
|
|
||||||
text = []
|
|
||||||
a = []
|
|
||||||
length = 0
|
|
||||||
text = article2list(article, loc)
|
|
||||||
a.append(find_title(text))
|
|
||||||
a.append(find_url(path))
|
|
||||||
a.append(clean_tags(prepare_rss_summary(text, path)))
|
|
||||||
a.append(find_timestamp(text))
|
|
||||||
result.append(a)
|
|
||||||
|
|
||||||
clear_file(f_name)
|
|
||||||
f = open(f_name, 'w')
|
|
||||||
f.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>" + '\n')
|
|
||||||
f.write("<rss version=\"2.0\">" + '\n')
|
|
||||||
f.write("<channel>" + '\n')
|
|
||||||
f.write("<title>blessfrey.me</title>" + '\n')
|
|
||||||
f.write("<link>https://www.blessfrey.me/</link>" + '\n')
|
|
||||||
f.write("<description>chimchooree's dev space</description>" + '\n')
|
|
||||||
f.write("<language>en-us</language>" + '\n')
|
|
||||||
f.write("<webMaster>chimchooree@mail.com (chimchooree)</webMaster>" + '\n')
|
|
||||||
|
|
||||||
for r in result:
|
|
||||||
f.write("<item>" + '\n')
|
|
||||||
f.write("<title>" + r[0] + "</title>" + '\n')
|
|
||||||
f.write("<link>" + loc2 + r[1] + "</link>" + '\n')
|
|
||||||
f.write("<description>" + r[2] + "</description>" + '\n')
|
|
||||||
code = r[1].replace(loc,'')
|
|
||||||
code = code.replace('/','')
|
|
||||||
f.write("<pubDate>" + format_rss_time(code) + "</pubDate>" + '\n')
|
|
||||||
f.write("<guid>" + loc2 + r[1] + "</guid>" + '\n')
|
|
||||||
f.write("</item>" + '\n')
|
|
||||||
|
|
||||||
f.write("</channel>" + '\n')
|
|
||||||
f.write("</rss>" + '\n')
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
# recommendations
|
|
||||||
|
|
||||||
def list_rec(page):
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
result = []
|
|
||||||
rec = []
|
|
||||||
comment = ""
|
|
||||||
if isinstance(page, int):
|
|
||||||
# Collect recommended articles from comment line
|
|
||||||
with open('diary/entries/' + str(page)) as f:
|
|
||||||
comment = f.readline()
|
|
||||||
comment = comment.replace('<!--','')
|
|
||||||
comment = comment.replace('-->','')
|
|
||||||
comment = comment.replace(' ','')
|
|
||||||
comment = clean(comment)
|
|
||||||
rec = comment.split(',')
|
|
||||||
# Convert into array for template to display
|
|
||||||
for article in rec:
|
|
||||||
if is_it_time(article):
|
|
||||||
path = loc + article
|
|
||||||
data = []
|
|
||||||
try:
|
|
||||||
with open(path) as f:
|
|
||||||
f.readline()
|
|
||||||
data.append(clean(f.readline().replace('<br>','')))
|
|
||||||
data.append(path)
|
|
||||||
result.append(data)
|
|
||||||
except EnvironmentError:
|
|
||||||
print("No article @ " + path)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# List latest 5 articles as headline links
|
|
||||||
def list_headlines(articles):
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
result = []
|
|
||||||
text = []
|
|
||||||
for article in articles:
|
|
||||||
path = loc + article
|
|
||||||
b = []
|
|
||||||
b.append(path)
|
|
||||||
with open(path) as f:
|
|
||||||
f.readline()
|
|
||||||
text = f.readline()
|
|
||||||
b.append(clean(text.replace('<br>','')))
|
|
||||||
result.append(b)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_tags(text):
|
|
||||||
new = text[3].replace('<br>','')
|
|
||||||
new = new.replace('\n','')
|
|
||||||
new = new.split(" ")
|
|
||||||
final = []
|
|
||||||
for n in new:
|
|
||||||
if len(n) <= 0:
|
|
||||||
new.remove(n)
|
|
||||||
if '#' in n:
|
|
||||||
final.append(n)
|
|
||||||
final.sort()
|
|
||||||
return final
|
|
||||||
|
|
||||||
# Return title of article, formatted for sharing via social media
|
|
||||||
def find_social_title(text):
|
|
||||||
return clean(text[1]).replace(' ','+')
|
|
||||||
|
|
||||||
# Return URL of article
|
|
||||||
def find_url(path):
|
|
||||||
return '/' + path.replace('.tpl','')
|
|
||||||
|
|
||||||
# Return clean timestamp
|
|
||||||
def find_timestamp(text):
|
|
||||||
return text[2].replace('<br>','')
|
|
||||||
|
|
||||||
# Return clean title
|
|
||||||
def find_title(text):
|
|
||||||
return clean(text[1])
|
|
||||||
|
|
||||||
# Return article as list of lines of text
|
|
||||||
def article2list(article, loc):
|
|
||||||
text = []
|
|
||||||
with open(loc + article) as f:
|
|
||||||
text = f.readlines()
|
|
||||||
return text
|
|
||||||
|
|
||||||
def retrieve_article(page, loc):
|
|
||||||
text = []
|
|
||||||
string = ""
|
|
||||||
with open(loc + str(page)) as f:
|
|
||||||
text = f.readlines()
|
|
||||||
for line in text:
|
|
||||||
string += line
|
|
||||||
return string
|
|
||||||
|
|
||||||
def retrieve_diary_entry_content(page,loc):
|
|
||||||
text = []
|
|
||||||
string = ""
|
|
||||||
with open(loc + str(page)) as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
for line in lines:
|
|
||||||
if lines.index(line) >= 4:
|
|
||||||
string += line
|
|
||||||
return string
|
|
||||||
|
|
||||||
def prepare_diary_entry(page, loc):
|
|
||||||
result = []
|
|
||||||
with open(loc + str(page)) as f:
|
|
||||||
text = []
|
|
||||||
text = article2list(str(page), loc)
|
|
||||||
result.append(find_title(text))
|
|
||||||
result.append(retrieve_diary_entry_content(page, loc))
|
|
||||||
result.append(find_timestamp(text))
|
|
||||||
result.append(find_url(loc + str(page)))
|
|
||||||
result.append(find_social_title(text))
|
|
||||||
result.append(find_tags(text))
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Return list of snippets using list of articles
|
|
||||||
def list_snippets(articles):
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
limit = 4
|
|
||||||
total = len(articles)
|
|
||||||
result = []
|
|
||||||
|
|
||||||
for article in articles:
|
|
||||||
path = loc + article
|
|
||||||
text = []
|
|
||||||
a = []
|
|
||||||
length = 0
|
|
||||||
text = article2list(article, loc)
|
|
||||||
a.append(find_title(text))
|
|
||||||
a.append(prepare_article(text, path))
|
|
||||||
a.append(find_timestamp(text))
|
|
||||||
a.append(find_url(path))
|
|
||||||
a.append(find_social_title(text))
|
|
||||||
a.append(find_tags(text))
|
|
||||||
result.append(a)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Return list of files with given tag
|
|
||||||
def pull_tag(files, tag):
|
|
||||||
pull = []
|
|
||||||
for f in files:
|
|
||||||
tags = find_tags(article2list(str(f), 'diary/entries/'))
|
|
||||||
if "#" + tag in tags:
|
|
||||||
pull.append(f)
|
|
||||||
pull.sort(reverse=True)
|
|
||||||
return pull
|
|
||||||
|
|
||||||
# Return line count of file
|
|
||||||
def count_lines(fname):
|
|
||||||
with open(fname) as f:
|
|
||||||
for linenum, line in enumerate(f,1):
|
|
||||||
pass
|
|
||||||
return linenum
|
|
||||||
|
|
||||||
# Return article text without HTML header
|
|
||||||
def find_content(text):
|
|
||||||
length = len(text)
|
|
||||||
content = ""
|
|
||||||
# form a string from relevant lines of the article
|
|
||||||
pos = 0
|
|
||||||
for line in text:
|
|
||||||
# skip to line 5
|
|
||||||
if pos > 4 and pos < length:
|
|
||||||
content += line
|
|
||||||
pos += 1
|
|
||||||
return content
|
|
||||||
|
|
||||||
# Snip article and close any open list tags
|
|
||||||
def prepare_rss_summary(text, path):
|
|
||||||
content = snip_sentence(find_content(text), path)
|
|
||||||
if content.count('<ul>') > content.count('</ul>'):
|
|
||||||
content += '</ul>'
|
|
||||||
return content
|
|
||||||
|
|
||||||
# Snip article and close any open list tags
|
|
||||||
def prepare_article(text, path):
|
|
||||||
content = snip_article(find_content(text), path)
|
|
||||||
if content.count('<ul>') > content.count('</ul>'):
|
|
||||||
content += '</ul>'
|
|
||||||
return content
|
|
||||||
|
|
||||||
# Remove links, line breaks from snippet
|
|
||||||
def clean(result):
|
|
||||||
result = result.replace('\n','')
|
|
||||||
result = result.replace('<br>','')
|
|
||||||
result = re.sub(r'<a href=.*?>', '', result)
|
|
||||||
result = re.sub(r'<img src=.*?>', '', result)
|
|
||||||
result = re.sub(r'<a target="_blank" href=.*?>', '', result)
|
|
||||||
result = result.replace('</a>','')
|
|
||||||
result = re.sub(r'<h\d>','',result)
|
|
||||||
result = re.sub(r'</h\d>','',result)
|
|
||||||
result = result.replace('<center>','')
|
|
||||||
result = result.replace('</center>','')
|
|
||||||
result = result.replace('<b>','')
|
|
||||||
result = result.replace('</b>','')
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Return first two sentences of article + " ... "
|
|
||||||
def snip_sentence(article, path):
|
|
||||||
article = clean(article)
|
|
||||||
limit = 100
|
|
||||||
result = article[0:min(len(article),limit)]
|
|
||||||
result = result.rsplit(' ',1)[0]
|
|
||||||
return result + " ... "
|
|
||||||
|
|
||||||
# Return first 300 words of article + " ... "
|
|
||||||
def snip_article(article, path):
|
|
||||||
article = clean(article)
|
|
||||||
limit = 300
|
|
||||||
result = article[0:min(len(article),limit)]
|
|
||||||
result = result.rsplit(' ',1)[0]
|
|
||||||
return result + " ... "
|
|
||||||
|
|
||||||
# Sort diary - newest to oldest
|
|
||||||
def sort_files(files):
|
|
||||||
files.sort(reverse=True)
|
|
||||||
return files
|
|
||||||
|
|
||||||
def curate_files(files):
|
|
||||||
# remove folders
|
|
||||||
if 'raw' in files:
|
|
||||||
files.remove('raw')
|
|
||||||
if 'extra' in files:
|
|
||||||
files.remove('extra')
|
|
||||||
# remove
|
|
||||||
clean = []
|
|
||||||
for f in files:
|
|
||||||
if is_it_time(f):
|
|
||||||
clean.append(f)
|
|
||||||
return clean
|
|
||||||
|
|
||||||
def is_it_time(date):
|
|
||||||
today = datetime.datetime.now()
|
|
||||||
today_string = today.strftime("%y") + today.strftime("%m") + today.strftime("%d")
|
|
||||||
return int(date) <= int(today_string)
|
|
||||||
|
|
||||||
# Return list of all diary entries (exclude raws + extras)
|
|
||||||
def gather_files(loc):
|
|
||||||
files = os.listdir(loc)
|
|
||||||
return files
|
|
||||||
|
|
||||||
def gather_and_sort(loc):
|
|
||||||
return sort_files(curate_files(gather_files(loc)))
|
|
||||||
|
|
||||||
def fill_box(new_file):
|
|
||||||
box = []
|
|
||||||
with open(new_file) as f:
|
|
||||||
for line in f:
|
|
||||||
box.append(line)
|
|
||||||
box.sort()
|
|
||||||
return box
|
|
||||||
|
|
||||||
# return list of diary entry tags, sorted by frequency
|
|
||||||
def fill_word_cloud(files):
|
|
||||||
tags = []
|
|
||||||
for f in files:
|
|
||||||
temp = find_tags(article2list(str(f), 'diary/entries/'))
|
|
||||||
for t in temp:
|
|
||||||
tags.append(t)
|
|
||||||
tags.sort()
|
|
||||||
cloud = []
|
|
||||||
i = 0
|
|
||||||
while i < 24:
|
|
||||||
if len(tags) > 0:
|
|
||||||
top = max(set(tags), key = tags.count)
|
|
||||||
cloud.append(top)
|
|
||||||
tags[:] = [x for x in tags if x != top]
|
|
||||||
i += 1
|
|
||||||
return cloud
|
|
||||||
|
|
||||||
def find_year():
|
|
||||||
now = datetime.datetime.now()
|
|
||||||
return now.strftime('%Y')
|
|
||||||
|
|
||||||
## Static ##
|
|
||||||
|
|
||||||
# Serve CSS
|
|
||||||
@route('/static/css/<filename:path>')
|
|
||||||
def serve_css(filename):
|
|
||||||
return static_file(filename, root='static/css')
|
|
||||||
|
|
||||||
# Serve images
|
|
||||||
@route('/static/img/<filename:path>')
|
|
||||||
def serve_img(filename):
|
|
||||||
return static_file(filename, root='static/img')
|
|
||||||
|
|
||||||
# Serve unlisted articles
|
|
||||||
@route('/static/extra/<filename:re:.*\.cpp>')
|
|
||||||
def serve_extra(filename):
|
|
||||||
return static_file(filename, root='static/extra', mimetype='text/plain', download=True)
|
|
||||||
|
|
||||||
# Serve XML
|
|
||||||
@route('/static/xml/<filename:path>')#re:.*\.xml>')
|
|
||||||
def serve_xml(filename):
|
|
||||||
return static_file(filename, root='static/xml', mimetype='text/xml')
|
|
||||||
|
|
||||||
## Routes ##
|
|
||||||
|
|
||||||
# Error Page
|
|
||||||
@error(404)
|
|
||||||
def error404(error):
|
|
||||||
return "unfortunately, a 404 error. the page you're searching for doesn't exist. (or am I just hiding it for now?) try another page! "
|
|
||||||
@error(500)
|
|
||||||
def error500(error):
|
|
||||||
return "unfortunately, a 500 error. something is wrong with the page you're trying to find, if it exists at all. try another page! <a href=https://www.blessfrey.me/>return to blessfrey.me.</a>"
|
|
||||||
@error(502)
|
|
||||||
def error502(error):
|
|
||||||
return "unfortunately, a 502 error. this was likely due to website maintenance. usually it'll be back up before you finish reading this, but otherwise, I'll notice something's wrong soon! <a href=https://www.blessfrey.me/>return to blessfrey.me.</a>"
|
|
||||||
|
|
||||||
# Downloads
|
|
||||||
@route('/download/<filename:path>')
|
|
||||||
def download(filename):
|
|
||||||
return static_file(filename, root='static/extra', download=filename)
|
|
||||||
|
|
||||||
# Home Page - Index Template
|
|
||||||
@route('/')
|
|
||||||
def index():
|
|
||||||
"""home page"""
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
info = {'css': 'index', 'news': list_headlines(gather_and_sort(loc)[0:10]), 'title': 'chimchooree\'s dev space - blessfrey', 'year': find_year()}
|
|
||||||
return template('index.tpl', info)
|
|
||||||
|
|
||||||
# Projects Page - Game Template - system, character, story info
|
|
||||||
@route('/projects')
|
|
||||||
def projects():
|
|
||||||
"""projects page"""
|
|
||||||
info = {'css': 'projects', 'title': 'chimchooree projects', 'year': find_year()}
|
|
||||||
return template('projects.tpl', info)
|
|
||||||
|
|
||||||
# Presskit Page - Presskit Template - product, developer info
|
|
||||||
@route('/presskit')
|
|
||||||
def presskit():
|
|
||||||
"""press page"""
|
|
||||||
info = {'css': 'presskit', 'title': 'blessfrey - presskit', 'year': find_year()}
|
|
||||||
return template('presskit.tpl', info)
|
|
||||||
|
|
||||||
# Start on first Diary page if no page given
|
|
||||||
@route('/diary')
|
|
||||||
def diary2():
|
|
||||||
return diary(0)
|
|
||||||
# Slash is optional
|
|
||||||
@route('/diary/')
|
|
||||||
def diary3():
|
|
||||||
return diary(0)
|
|
||||||
|
|
||||||
# Diary Page - Diary Template - list all articles
|
|
||||||
@route('/diary/<page:int>')
|
|
||||||
def diary(page):
|
|
||||||
"""diary page"""
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
assert isinstance(page, int)
|
|
||||||
info = {'css': 'diary', 'title': 'blessfrey - developer diary', 'year': find_year(), 'snippets': list_snippets(gather_and_sort(loc)), 'latest': list_headlines(gather_and_sort(loc)[0:5]), 'tags': fill_word_cloud(curate_files(gather_files(loc))), 'total': len(curate_files(gather_files(loc))), 'limit': 8, 'cluster': 3, 'page': page}
|
|
||||||
return template('diary.tpl', info)
|
|
||||||
|
|
||||||
# Entry Page - Feature Template - for articles
|
|
||||||
@route('/diary/entries/<page:int>')
|
|
||||||
def entry(page):
|
|
||||||
"""diary entry"""
|
|
||||||
if not is_it_time(page):
|
|
||||||
return error404(404)
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
info = {'css': 'feature', 'title': 'blessfrey - developer diary', 'year': find_year(), 'entry': prepare_diary_entry(page, loc), 'recommends': list_rec(page), 'articles': "Articles", 'latest': list_headlines(gather_and_sort(loc)[0:5]), 'tags': fill_word_cloud(curate_files(gather_files(loc))), 'page': page}
|
|
||||||
abs_app_dir_path = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
abs_views_path = os.path.join(abs_app_dir_path, 'views')
|
|
||||||
TEMPLATE_PATH.insert(0, abs_views_path )
|
|
||||||
return template(os.path.join(abs_views_path,'feature.tpl'), info)
|
|
||||||
|
|
||||||
# Extra Page - Feature Template - for unlisted articles
|
|
||||||
@route('/diary/entries/extra/<page>')
|
|
||||||
def extra(page):
|
|
||||||
"""diary extra"""
|
|
||||||
loc = 'diary/entries/extra/'
|
|
||||||
info = {'css': 'feature', 'title': 'blessfrey - developer diary', 'year': find_year(), 'entry': retrieve_article(page, loc), 'recommends': list_rec(page), 'articles': "Articles", 'latest': list_headlines(gather_and_sort(loc)[0:5]), 'tags': fill_word_cloud(curate_files(gather_files(loc))), 'page': page}
|
|
||||||
abs_app_dir_path = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
abs_views_path = os.path.join(abs_app_dir_path, 'views')
|
|
||||||
TEMPLATE_PATH.insert(0, abs_views_path )
|
|
||||||
return template(os.path.join(abs_views_path,'feature.tpl'), info)
|
|
||||||
|
|
||||||
# Start on first Diary tag page if no page given
|
|
||||||
@route('/diary/tag/<tagin>')
|
|
||||||
def tag2(tagin):
|
|
||||||
return tag(tagin, 0)
|
|
||||||
|
|
||||||
# Tag Page - Diary Tag Template - list all articles for tag
|
|
||||||
@route('/diary/tag/<tagin>/<page:int>')
|
|
||||||
def tag(tagin, page):
|
|
||||||
"""tag page"""
|
|
||||||
loc = 'diary/entries/'
|
|
||||||
assert isinstance(tagin, str)
|
|
||||||
assert isinstance(page, int)
|
|
||||||
info = {'css': 'diary', 'title': 'blessfrey - developer diary', 'year': find_year(), 'snippets': list_snippets(pull_tag(gather_and_sort(loc), tagin)), 'latest': list_headlines(gather_and_sort(loc)[0:5]), 'tags': fill_word_cloud(curate_files(gather_files(loc))), 'total': len(curate_files(gather_files(loc))), 'limit': 8, 'cluster': 3, 'page': page}
|
|
||||||
return template('diary.tpl', info)
|
|
||||||
|
|
||||||
# Personal Page - Box Template
|
|
||||||
@route('/box')
|
|
||||||
def box():
|
|
||||||
"""personal page"""
|
|
||||||
info = {'css': 'box', 'title': 'chimchooree\'s personal page', 'year': find_year()}
|
|
||||||
return template('box.tpl', info)
|
|
||||||
|
|
||||||
# Credits Page - Credits Template
|
|
||||||
@route('/credits')
|
|
||||||
def credits():
|
|
||||||
"""credits page"""
|
|
||||||
info = {'css': 'contact', 'title': 'blessfrey - credits', 'year': find_year()}
|
|
||||||
return template('credits.tpl', info)
|
|
||||||
|
|
||||||
# Contact Page - Contact Template
|
|
||||||
@route('/contact')
|
|
||||||
def contact():
|
|
||||||
"""contact page"""
|
|
||||||
info = {'css': 'contact', 'title': 'blessfrey - contact chimchooree', 'year': find_year()}
|
|
||||||
return template('contact.tpl', info)
|
|
||||||
|
|
||||||
# Idea Box Page - Box Template
|
|
||||||
@route('/ideabox')
|
|
||||||
def ideabox():
|
|
||||||
"""idea box page"""
|
|
||||||
info = {'css': 'box', 'title': 'blessfrey - idea box - a collection of inspiring concepts', 'words': fill_box('diary/entries/extra/ideabox'), 'limit': 5, 'year': find_year()}
|
|
||||||
return template('ideabox.tpl', info)
|
|
||||||
|
|
||||||
# Task Box Page - Box Template
|
|
||||||
@route('/taskbox')
|
|
||||||
def taskbox():
|
|
||||||
"""task box page"""
|
|
||||||
info = {'css': 'box', 'title': 'blessfrey - task box - everything needed to complete blessfrey', 'game_words': fill_box('diary/entries/extra/taskbox'), 'web_words': fill_box('diary/entries/extra/websitebox'), 'limit': 5, 'year': find_year()}
|
|
||||||
return template('taskbox.tpl', info)
|
|
||||||
|
|
||||||
## Main ##
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
make_rss()
|
|
||||||
run(host='127.0.0.1', port=9001)
|
|
Loading…
Reference in New Issue