diff --git a/src/ORIGINALindex.py b/src/ORIGINALindex.py
deleted file mode 100644
index 9cd4250..0000000
--- a/src/ORIGINALindex.py
+++ /dev/null
@@ -1,506 +0,0 @@
-import datetime, os, re
-from bottle import error, response, route, run, static_file, template, TEMPLATE_PATH
-
-def clean_tags(raw):
- cleanr = re.compile('<.*?>')
- cleantext = re.sub(cleanr, '', raw)
- return cleantext
-
-# rss feed
-
-# RSS Generation
-def make_rss():
- loc = 'diary/entries/'
- info = {'items': list_items(gather_and_sort(loc)[0:15])}
-
-# Delete old version
-def clear_file(f_name):
- if os.path.exists(f_name):
- print("removing " + f_name)
- os.remove(f_name)
- f = open(f_name, 'a+')
-
-def format_rss_time(date):
- return datetime.datetime.strptime(date, '%y%m%d').strftime('%a') + ', ' + datetime.datetime.strptime(date, '%y%m%d').strftime('%d %b %Y') + " 05:00:05 GMT"
-
-# Return list of items using list of articles
-def list_items(articles):
- f_name = "static/xml/blessfrey.xml"
- loc2 = 'https://www.blessfrey.me'
- loc = 'diary/entries/'
- loc3 = loc2 + loc
- result = []
-
- for article in articles:
- path = loc + article
- text = []
- a = []
- length = 0
- text = article2list(article, loc)
- a.append(find_title(text))
- a.append(find_url(path))
- a.append(clean_tags(prepare_rss_summary(text, path)))
- a.append(find_timestamp(text))
- result.append(a)
-
- clear_file(f_name)
- f = open(f_name, 'w')
- f.write("" + '\n')
- f.write("" + '\n')
- f.write("" + '\n')
- f.write("blessfrey.me" + '\n')
- f.write("https://www.blessfrey.me/" + '\n')
- f.write("chimchooree's dev space" + '\n')
- f.write("en-us" + '\n')
- f.write("chimchooree@mail.com (chimchooree)" + '\n')
-
- for r in result:
- f.write("" + '\n')
- f.write("" + r[0] + "" + '\n')
- f.write("" + loc2 + r[1] + "" + '\n')
- f.write("" + r[2] + "" + '\n')
- code = r[1].replace(loc,'')
- code = code.replace('/','')
- f.write("" + format_rss_time(code) + "" + '\n')
- f.write("" + loc2 + r[1] + "" + '\n')
- f.write("" + '\n')
-
- f.write("" + '\n')
- f.write("" + '\n')
- f.close()
-
- return result
-
-# recommendations
-
-def list_rec(page):
- loc = 'diary/entries/'
- result = []
- rec = []
- comment = ""
- if isinstance(page, int):
- # Collect recommended articles from comment line
- with open('diary/entries/' + str(page)) as f:
- comment = f.readline()
- comment = comment.replace('','')
- comment = comment.replace(' ','')
- comment = clean(comment)
- rec = comment.split(',')
- # Convert into array for template to display
- for article in rec:
- if is_it_time(article):
- path = loc + article
- data = []
- try:
- with open(path) as f:
- f.readline()
- data.append(clean(f.readline().replace(' ','')))
- data.append(path)
- result.append(data)
- except EnvironmentError:
- print("No article @ " + path)
- return result
-
-# List latest 5 articles as headline links
-def list_headlines(articles):
- loc = 'diary/entries/'
- result = []
- text = []
- for article in articles:
- path = loc + article
- b = []
- b.append(path)
- with open(path) as f:
- f.readline()
- text = f.readline()
- b.append(clean(text.replace(' ','')))
- result.append(b)
- return result
-
-def find_tags(text):
- new = text[3].replace(' ','')
- new = new.replace('\n','')
- new = new.split(" ")
- final = []
- for n in new:
- if len(n) <= 0:
- new.remove(n)
- if '#' in n:
- final.append(n)
- final.sort()
- return final
-
-# Return title of article, formatted for sharing via social media
-def find_social_title(text):
- return clean(text[1]).replace(' ','+')
-
-# Return URL of article
-def find_url(path):
- return '/' + path.replace('.tpl','')
-
-# Return clean timestamp
-def find_timestamp(text):
- return text[2].replace(' ','')
-
-# Return clean title
-def find_title(text):
- return clean(text[1])
-
-# Return article as list of lines of text
-def article2list(article, loc):
- text = []
- with open(loc + article) as f:
- text = f.readlines()
- return text
-
-def retrieve_article(page, loc):
- text = []
- string = ""
- with open(loc + str(page)) as f:
- text = f.readlines()
- for line in text:
- string += line
- return string
-
-def retrieve_diary_entry_content(page,loc):
- text = []
- string = ""
- with open(loc + str(page)) as f:
- lines = f.readlines()
- for line in lines:
- if lines.index(line) >= 4:
- string += line
- return string
-
-def prepare_diary_entry(page, loc):
- result = []
- with open(loc + str(page)) as f:
- text = []
- text = article2list(str(page), loc)
- result.append(find_title(text))
- result.append(retrieve_diary_entry_content(page, loc))
- result.append(find_timestamp(text))
- result.append(find_url(loc + str(page)))
- result.append(find_social_title(text))
- result.append(find_tags(text))
- return result
-
-# Return list of snippets using list of articles
-def list_snippets(articles):
- loc = 'diary/entries/'
- limit = 4
- total = len(articles)
- result = []
-
- for article in articles:
- path = loc + article
- text = []
- a = []
- length = 0
- text = article2list(article, loc)
- a.append(find_title(text))
- a.append(prepare_article(text, path))
- a.append(find_timestamp(text))
- a.append(find_url(path))
- a.append(find_social_title(text))
- a.append(find_tags(text))
- result.append(a)
- return result
-
-# Return list of files with given tag
-def pull_tag(files, tag):
- pull = []
- for f in files:
- tags = find_tags(article2list(str(f), 'diary/entries/'))
- if "#" + tag in tags:
- pull.append(f)
- pull.sort(reverse=True)
- return pull
-
-# Return line count of file
-def count_lines(fname):
- with open(fname) as f:
- for linenum, line in enumerate(f,1):
- pass
- return linenum
-
-# Return article text without HTML header
-def find_content(text):
- length = len(text)
- content = ""
- # form a string from relevant lines of the article
- pos = 0
- for line in text:
- # skip to line 5
- if pos > 4 and pos < length:
- content += line
- pos += 1
- return content
-
-# Snip article and close any open list tags
-def prepare_rss_summary(text, path):
- content = snip_sentence(find_content(text), path)
- if content.count('
') > content.count('
'):
- content += ''
- return content
-
-# Snip article and close any open list tags
-def prepare_article(text, path):
- content = snip_article(find_content(text), path)
- if content.count('