From 07a026e93f42ce55fe15d1803d0f2c346719dba6 Mon Sep 17 00:00:00 2001 From: gauthiier Date: Wed, 17 Jul 2019 12:55:47 +0200 Subject: [PATCH] index +search info --- archive.py | 2 +- lists/crawl.py | 4 +- lists/mhonarc_nettime.py | 257 ++++++++++++++++++++------------------ www/archives.py | 79 ------------ www/routes.py | 5 + www/static/CYBERPLA.GIF | Bin 0 -> 527 bytes www/static/cover.gif | Bin 0 -> 4789 bytes www/static/favicon.ico | Bin 0 -> 894 bytes www/static/ooo.js | 17 +++ www/static/search.js | 7 +- www/templates/index.html | 11 +- www/templates/search.html | 14 +++ 12 files changed, 186 insertions(+), 210 deletions(-) delete mode 100644 www/archives.py create mode 100644 www/static/CYBERPLA.GIF create mode 100644 www/static/cover.gif create mode 100644 www/static/favicon.ico create mode 100644 www/static/ooo.js diff --git a/archive.py b/archive.py index 18698bf..4dc0652 100644 --- a/archive.py +++ b/archive.py @@ -15,7 +15,7 @@ def run(args): i = 0 for u in args.url: name = args.names[i] if i < len(args.names) else None - lists.crawl.crawl(u, name, args.arch) + lists.crawl.crawl(url=u, name=name, sublist_name=name, archive_dir=args.arch) #<-- sublist for nettime i = i + 1 sys.exit() diff --git a/lists/crawl.py b/lists/crawl.py index 73529e2..4aaf016 100644 --- a/lists/crawl.py +++ b/lists/crawl.py @@ -26,7 +26,9 @@ def crawl(url, name, sublist_name=None, archive_dir="archives"): # special case -- nettime. # the name should be the sublist_name (i.e nettime-l) elif "nettime" in name: - mhonarc_nettime.collect_from_url(url, name, name, archive_dir) + if sublist_name is None: + sublist_name = name + mhonarc_nettime.collect_from_url(url, name, sublist_name, archive_dir) else: print('mhonarc?') diff --git a/lists/mhonarc_nettime.py b/lists/mhonarc_nettime.py index 5476f37..6380c75 100644 --- a/lists/mhonarc_nettime.py +++ b/lists/mhonarc_nettime.py @@ -6,125 +6,132 @@ DELAY = 0.2 def collect_from_url(url, name, sublist_name, base_archive_dir="archives", mbox=False): - response = urllib.request.urlopen(url) - html = response.read() - soup = BeautifulSoup(html, "html5lib") + response = urllib.request.urlopen(url) + html = response.read() + soup = BeautifulSoup(html, "html5lib") - # base url - base_url = soup.select('body p:nth-of-type(2) base')[0].get('href') + # base url + base_url = soup.select('body p:nth-of-type(2) base')[0].get('href') #collect name - list_name = soup.select('body p:nth-of-type(2) title')[0].string - logging.info("Getting " + list_name + " list archive for " + sublist_name) + list_name = soup.select('body p:nth-of-type(2) title')[0].string + logging.info("Getting " + list_name + " list archive for " + sublist_name) - # create (main) directory - # this is where all temp files will be created - d = os.path.join(base_archive_dir, name) - if not os.path.exists(d): - os.makedirs(d) + # create (main) directory + # this is where all temp files will be created + d = os.path.join(base_archive_dir, name) + if not os.path.exists(d): + os.makedirs(d) - threads = [] - lists = soup.select('ul:nth-of-type(2) li') + threads = [] + lists = soup.select('ul:nth-of-type(2) li') - for l in lists: - if l.strong is None: - continue + for l in lists: - name = l.strong.string + if l.strong is None: + continue - if name.lower() == sublist_name.lower(): + name = l.strong.string + print(name + " - " + sublist_name) - threads_url_list = [] - threads_links = l.select('ul li a') - for t in threads_links: - thread_url = urllib.parse.urljoin(base_url, t.get('href')) - threads_url_list.append(thread_url) + if name.lower() == sublist_name.lower(): - nbr_threads = str(len(threads_url_list)) - n = 0 + threads_url_list = [] + threads_links = l.select('ul li a') - for u in threads_url_list: - time.sleep(DELAY) - n += 1 - logging.info("## " + str(n) + " / " + nbr_threads + " ##") - try: - threads.append(collect_threads_from_url(u, base_archive_dir=d, mbox=mbox)) - except KeyboardInterrupt: - sys.exit(0) - except: - logging.warning("Error archiving: " + l[1] + "... Continuing.") - ex_t, ex, tb = sys.exc_info() - print(ex_t) - traceback.print_tb(tb) - del tb - continue + for t in threads_links: + thread_url = urllib.parse.urljoin(base_url, t.get('href')) + threads_url_list.append(thread_url) - return threads + nbr_threads = str(len(threads_url_list)) + n = 0 - # for u in threads_url_list[0:10]: - # print "---------------------------------------" - # tt = collect_threads_from_url(u, base_archive_dir, mbox) - # threads.append(tt) + for u in threads_url_list: + time.sleep(DELAY) + n += 1 + logging.info("## " + str(n) + " / " + nbr_threads + " ##") + try: + threads.append(collect_threads_from_url(u, base_archive_dir=d, mbox=mbox)) + except KeyboardInterrupt: + sys.exit(0) + except: + logging.warning("Error archiving: " + l[1] + "... Continuing.") + ex_t, ex, tb = sys.exc_info() + print(ex_t) + traceback.print_tb(tb) + del tb + continue - return None + return threads + + # for u in threads_url_list[0:10]: + # print "---------------------------------------" + # tt = collect_threads_from_url(u, base_archive_dir, mbox) + # threads.append(tt) + + return None def collect_threads_from_url(url, base_archive_dir, mbox=False): - response = urllib.request.urlopen(url) - html = response.read() - soup = BeautifulSoup(html, "html5lib") + response = urllib.request.urlopen(url) + html = response.read() + soup = BeautifulSoup(html, "html5lib") - # base url - base_url = url + # base url + base_url = url - # collect name - threads_name = soup.select('p:nth-of-type(1) title')[0].string - threads_name = threads_name.replace(' ', '_') + # collect name - # thread data struct - threads = {'name' : threads_name, 'url' : base_url, 'threads' : []} + e = soup.select('p:nth-of-type(1) title') + print(soup) - logging.info("Collecting Threads of: " + threads_name) + threads_name = soup.select('p:nth-of-type(1) title')[0].string + threads_name = threads_name.replace(' ', '_') - # check if archive already exists - file_path = os.path.join(base_archive_dir, threads['name'] + ".json") - if os.path.isfile(file_path): - logging.info("archive already exists. loading from file " + file_path) - with open(file_path, 'r') as fpin: - threads = json.load(fpin) - else: - lists = soup.select('ul:nth-of-type(1) > li') + # thread data struct + threads = {'name' : threads_name, 'url' : base_url, 'threads' : []} - nbr_threads = str(len(lists)) - n = 0 + logging.info("Collecting Threads of: " + threads_name) - for l in lists: - n += 1 - logging.info("> " + str(n) + " / " + nbr_threads) + # check if archive already exists + file_path = os.path.join(base_archive_dir, threads['name'] + ".json") + if os.path.isfile(file_path): + logging.info("archive already exists. loading from file " + file_path) + with open(file_path, 'r') as fpin: + threads = json.load(fpin) + else: + lists = soup.select('ul:nth-of-type(1) > li') - try: - thread = archive_thread(l, base_url, None) - threads['threads'].append(thread) - except: - ex_type, ex, tb = sys.exc_info() - traceback.print_tb(tb) - del tb - continue + nbr_threads = str(len(lists)) + n = 0 - time.sleep(DELAY) + for l in lists: + n += 1 + logging.info("> " + str(n) + " / " + nbr_threads) - # write - logging.info("writing archive to file " + file_path) + try: + thread = archive_thread(l, base_url, None) + threads['threads'].append(thread) + except: + ex_type, ex, tb = sys.exc_info() + traceback.print_tb(tb) + del tb + continue - with open(file_path, 'w') as fp: - json.dump(threads, fp, indent=4) + time.sleep(DELAY) - logging.info("done. ") + # write + logging.info("writing archive to file " + file_path) - return threads + with open(file_path, 'w') as fp: + json.dump(threads, fp, indent=4) - + logging.info("done. ") + + return threads + + def archive_thread(li, base_url, parent_thread_data): @@ -158,57 +165,57 @@ def archive_thread(li, base_url, parent_thread_data): def collect_message(url, message): - response = urllib.request.urlopen(url) - html = response.read().decode(encoding="utf-8") - # html = response.read() - soup = BeautifulSoup(html, "html5lib") + response = urllib.request.urlopen(url) + html = response.read().decode(encoding="utf-8") + # html = response.read() + soup = BeautifulSoup(html, "html5lib") - #note: this should follow an RFC header standard -- MHonArc has header info in the 1th
+	#note: this should follow an RFC header standard -- MHonArc has header info in the 1th 
 
-    message_labels = ('to', 'subject', 'from', 'date', 'message-id', 'content-type')    
+	message_labels = ('to', 'subject', 'from', 'date', 'message-id', 'content-type')    
 
-    # mhonarc xcomments
-    # ref: http://www.schlaubert.de/MHonArc/doc/resources/printxcomments.html
-    message['subject'] = parse_xcomment(soup, "X-Subject")
-    message['date'] = parse_xcomment(soup, "X-Date")
-    message['from'] = parse_xcomment(soup, "X-From-R13") #useless...
-    message['message-id'] = parse_xcomment(soup, 'X-Message-Id')
-    message['content-type'] = parse_xcomment(soup, 'X-Content-Type')
+	# mhonarc xcomments
+	# ref: http://www.schlaubert.de/MHonArc/doc/resources/printxcomments.html
+	message['subject'] = parse_xcomment(soup, "X-Subject")
+	message['date'] = parse_xcomment(soup, "X-Date")
+	message['from'] = parse_xcomment(soup, "X-From-R13") #useless...
+	message['message-id'] = parse_xcomment(soup, 'X-Message-Id')
+	message['content-type'] = parse_xcomment(soup, 'X-Content-Type')
 
-    # parse what is displayed on the page
+	# parse what is displayed on the page
 
-    info = soup.select('ul:nth-of-type(1) > li')
+	info = soup.select('ul:nth-of-type(1) > li')
 
-    for i in info:
-        if i.em == None:
-            continue
-        field = i.em.string
-        if field.lower() in message_labels:
-        	message[field.lower()] = i.text.strip(field + ": ")
+	for i in info:
+		if i.em == None:
+			continue
+		field = i.em.string
+		if field.lower() in message_labels:
+			message[field.lower()] = i.text.strip(field + ": ")
 
-    ## reformat from -- [author_name, email_addr]
+	## reformat from -- [author_name, email_addr]
 
-    # from_addr = email.utils.parseaddr(message['from'])
-    # message['author_name'] = from_addr[0]
-    # message['from'] = from_addr[1]
+	# from_addr = email.utils.parseaddr(message['from'])
+	# message['author_name'] = from_addr[0]
+	# message['from'] = from_addr[1]
 
-    ## -- content --
-    # test
-    # c1 = soup.select('pre:nth-of-type(1)')
-    # if len(c1) > 0:
-    #     message['content'] = c1[0].text
-    # else:
-    #     message['content'] = soup.select('pre:nth-of-type(2)')[0].text
+	## -- content --
+	# test
+	# c1 = soup.select('pre:nth-of-type(1)')
+	# if len(c1) > 0:
+	#     message['content'] = c1[0].text
+	# else:
+	#     message['content'] = soup.select('pre:nth-of-type(2)')[0].text
 
-    message['content'] = soup.select('pre:nth-of-type(2)')[0].text
+	message['content'] = soup.select('pre:nth-of-type(2)')[0].text
 
 # mhonarc xcomments
 # ref: http://www.schlaubert.de/MHonArc/doc/resources/printxcomments.html
 def parse_xcomment(soup, xcom):
-    com = soup.find(text=re.compile(xcom))
-    if com is not None:
-        return com.strip('').strip(xcom + ":").strip()
-    return com
+	com = soup.find(text=re.compile(xcom))
+	if com is not None:
+		return com.strip('').strip(xcom + ":").strip()
+	return com
 
 def test_xcomment(soup):
-    return soup.find(text=re.compile('X-Message-Id')) is not None
+	return soup.find(text=re.compile('X-Message-Id')) is not None
diff --git a/www/archives.py b/www/archives.py
deleted file mode 100644
index 7402376..0000000
--- a/www/archives.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import logging, os, json
-import search.archive
-
-class Singleton(type):
-    _instances = {}
-    def __call__(cls, *args, **kwargs):
-        if cls not in cls._instances:
-            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
-            logging.info('**** new Singleton instance')
-        return cls._instances[cls]
-
-class Archives(metaclass=Singleton):
-
-	def __init__(self, archives_dir=None):
-
-		if archives_dir==None:
-			from www import config
-			self.archives_dir = config.ARCHIVES_PATH
-		else:
-			self.archives_dir = archives_dir
-
-		self.data = {}
-		self.loaded = False
-
-		logging.info('loading archives...')
-
-		self.load()
-
-		logging.info('done.')
-
-	def load(self):
-
-		if self.loaded:
-			return
-
-		if not os.path.isdir(self.archives_dir):
-			logging.error("Archives:: the path - " + self.archives_dir + " - is not a valid directory. Aborting.")
-			logging.error(" -- current cwd is: " + os.getcwd())
-
-			return
-
-		arch = [d for d in os.listdir(self.archives_dir) if os.path.isdir(os.path.join(self.archives_dir, d))]
-
-		self.data = {}
-		for a in arch:
-
-			logging.info("loading " + a)
-
-			# archive_path = os.path.join(self.archives_dir, a)
-			self.data[a] = self.load_archive(self.archives_dir, a)
-
-			logging.info("done.")
-
-		self.loaded = True
-		
-
-	def load_archive(self, archive_dir, archive_name):
-
-		if not os.path.isdir(archive_dir):
-			logging.error("Archives:: the path - " + archive_dir + " - is not a valid directory. Aborting.")
-			return
-
-		archive = search.archive.Archive(archive_dir)
-		archive.load(archive_name)
-		return archive
-
-		# # -- shoudl use Archive in searh module here....
-
-		# files = [f for f in os.listdir(archive_dir) if f.endswith('.json')]
-
-		# arch = {}
-		# for f in files:
-		# 	file_path = os.path.join(archive_dir, f)
-		# 	with open(file_path) as fdata:
-		# 		arch[f.replace('.json', '')] = json.load(fdata)
-
-		# return arch	
-
-
diff --git a/www/routes.py b/www/routes.py
index f2b33be..952e80f 100644
--- a/www/routes.py
+++ b/www/routes.py
@@ -11,6 +11,11 @@ import logging
 def index():
 	return render_template("index.html")
 
+@app.route('/favicon.ico')
+def favicon():
+    return send_from_directory(os.path.join(app.root_path, 'static'),
+                               'favicon.ico', mimetype='image/vnd.microsoft.icon')	
+
 @app.route('/search')
 def searh():
 	
diff --git a/www/static/CYBERPLA.GIF b/www/static/CYBERPLA.GIF
new file mode 100644
index 0000000000000000000000000000000000000000..f6e0f058f8e0156549332e4ff26ed92eedd7b56a
GIT binary patch
literal 527
zcmV+q0`UDuNk%w1VO9W80Pp|+|Ns90001HR1OWg5001li0001108jux0{?`MsmtvT
zqfDXNi?hzNKkXe`0C6FMl~*%X;rhZS(AKdlrG~zXG>NFSJTjGq!Z|vG#hjAZyfsIk
z=L{KLTA|zPi3#p{$Ic=7yr80Ypfy&^OAE`mS-cv~Z$x5Nu?^Ix_tmGsqXR_4miVKW
zg}=0;j-dWxDF_eYWb;ExLE7ex`=0Hd)v|Z
z#yBk7OB{=gB8;2+tEv3Un#Tvc8kMwpeVP2a+S7Bi$tvZ|2HvRnQydn{DdQ5DzBO%K
z9Sx~X2+!QRuOF-vH$(RF0W?xj+M0gHbUAZ42@^hp6H)mJxNFusN9Gzkdi)47q{vy*
zMq;eTP~^a6HdLw^^O9u7g(+!9M0oSwL5L3}&MYRXA2248a-Ia3lc+gLMrpB(Su`O*
zXACcH-1!u1Gn7A_cDu?|sim*U!p7{`ENewtbKEV(r7kJ8a#15GA#qn5Ikz~gsY8d2
zBj1MSyv8L~Z?M9`i2}uA#P#Ic$9D_s6if%~wZC}Bp}cz3r&d5UpOS@nDD_RW+t%W=
z+wKihsiGmbl!%%(P%^9y&elEE^(*1Sa%(MnIytG*+dKjPXH7TrQc#^Ym1#A2SCec@
Ra)-YCyZG_s4G=&906VM5|E>T4

literal 0
HcmV?d00001

diff --git a/www/static/cover.gif b/www/static/cover.gif
new file mode 100644
index 0000000000000000000000000000000000000000..dab25398e214297410f84e7de90c98c8171e360d
GIT binary patch
literal 4789
zcmV;m5=!kyNk%w1VJrbD0kZ%A|Ns9O7#KoALStiNdwYAWt*yPiz31oW7ytlD003hE
z0Gt2-y#N5-0093000000EC2ui04xD10YC)*5XecZy*TU5yZ>M)j$~<`XsWI(B?Q46
z!?GMac&_h!@Bg`(uttj=TF9hw$7JM
z?6_MFpWELEv|cwW3yxxef*l7@dshsJii?R3j*pO$l9QB`mY0@mjEs4QK3ji-qAi6`
zo;#b3n5(RKbMfNHNQo|*%wbHy%b6g1
zv}Ev-AcKqWs6her1)n}N2zs`{xq}?Jn>nW}n7Tlz)kg`a=xUgY8JT29&5>W(Fs)_&lq&oPp
z<;#U5Q~Eh|F_p$0!U9|18S{e#epv-x&04T&!kQ%$EDfZ92fi$+Vp?DYLDg^yEGn%0
z8c1x};yAmu0(!!M8(ESnmP(11z|zZcB;XB1x+LyC1jbshws=mnsgVbx{lj*K=V!b}
zGMIpsXWu-)|2S?*|Bm{u5N96$BI?m~MROBDhlPBOFz^F|Sm_|1BkV=@3P2EgG~kB(
z75GYpJs=1Gd=^R)oDVitB8_1gPU4>qzkOHUhUSq&UV0u@0wNFYVL;-7m7M59hmgE@
z3Xv-gsiJ^1f<$DFIv#kNf>*TYz>W`e<3N(>IMM)?JRp!jY(W7L<0VWsiO!QyM#!89
zKUV1um|==}Q8R9AsR(&=?r>&LY3iU>pgFMF<|=(Y(BlZWodFI5=Ah%-p^l`9W18xa
zGnaC$b?PaOz$7Z-JzK40MPQV+shp*kkVg)ouNivDs#83~6%t@SD9^^Xle)rGh912yC7T@;w!SNo1*JZQ5x$mGmM+1SXk^z`{{BmXHrnXH>!0*8
zbuhw@=*w=q;W{ktE{7zt#-Sf^gz+S8Fp%g;LwW4-dmyhF%`YcQHnT5jM$z%hJd4mS
z#N_6b%Fwco^bXOAtX%8Q9QW+>zuItNs+v?gBy}iHXRUP!QC}@})em^>_0C(DZT8t{
zr>*wdP82bO+eExAcieQtKsMWW=RJYgrhr`-C}Zm_I1@I&0E6F%*JAkM4G;cwDO;FP
zoEns;Y`Gd9UamR+Z6M>^6(2HKNhGeP-XJvtqt9;K(u-j82)#K
z075A+l_}P}gpt6gI8b;Aql&0-C9!C!D}RX#p?0*DtxXk>F()8^3Uy__*;P=4FjNzp
z5;Xu75}1$km57zDWS2TR;n4^s4xE>7W#lxm`n#8(9?
z?TC)U@KZ8;gA9Tp|W3
zbcOy=Ad*yo5SP%HFrAz+3jmVhCeifCHI4F0QhE;`Q#lDu`XH7;Dhx0w@k&Ehs+NW+
zS{`$m0UHtkfNPXQ1E!G3N4#atc;@L}d&~8keYM
zCdRA;w&Jo#!o1~1`(q{u@_`pUCT
zLZ@KL(a}_y7-UsWVevwqnk``R8kk27HdJIOf+_uS98#l0KGZR#SP4xP_xQ!I-gTgB
z64O_*q-8Oi#zd@8Ri0N|GXx&o6W4LUh-=+a(1eXXF1Dbh6_SVfgwy}`CTs@PQSgalPvK;7|*Us5ZtMSdSm<_EM_(s~&n)V5x6|HJl
z%i7kumI9o84ftM5TPDP|vpq;{ZFdWR+u}AVv*qpJeEVA(1y{H|BW`i+cwFR$LAe9}
zsw+1Ur(5Ti0Hx-UZh??@it1VylEq0QV1qH)<{}rnEFJG(xH|>&vKGBKRj;(z%LDi7
z6}nZ@E@rm9P`w4#Lxd}l
zCJK+j!kBbML&PhY$zox{)$v46Uj|Ss^Z7-gCK(;u~IUeUUTt}#itl>
zjt9)+gUnbxKb8oH`zYiV?l^HMjzy9`9OM{B(Lhv-^19IA;SaRf2P6xzQcT=~Bm3B-
z&f&JC{EatDnAB6E48eZd(2nF;;L
z8KRj4fR+%SVXuLBV
zjHi_MVCEDrx7+l3^%xnPXGqjq2e?DoBfjKDfvl2;sT{~PAE0el4;tL+;&p<=JV_X%
ztSOXKhai6-?2G{c+BaqOq{E?abMhM!z(zN=1w~16q(L>txe%*yfbV5m0oF?nPcH+#
z3SZ*D;u88uy(Ix6ph`QL#?CY($vp`Sb;ul(GWaQ0&SoAI91<^|+$|gb9`YVQ$mIXo
zjL#i0?^C(r+&@?6#`iPyQjYuKTQbO#7Rhr*Kz+JX=RwCEdF*{(*_tGeQek*rAu9#_
z)3YNw&0Uo7OGLcNt^7f1-Y&zqTV0n}Uw3z&)bzkJ9g!3b`^8b$@Cx`E={-ouxi<}y
zhVY#4O&U9mWj-atH|P+F#L#x~M9g?EMe$b9{F6Tpr@7mGW#xVbY1(2H%oB*(We0q8
z(pC7&#D3U42D+1Cj>7Bu`q!Rh5fF1ER1Ob+l#TcD@rB`rc@jfRRkYE<)z0n2YxojH
z4%SGbe;bI(m*EpF*q}3-dq`jV{EB@3@H*RZ@*Cgt@UMCB!5>5a)knhqfA)U)HwodV
zOyT!^t56ujK|oFnz)RJ
z7DI%XIL~;AU386zlZ{&lHse)@3b>417!`ErcN^l3qE(K2P>1t(80vUf+2xM>XBoCw
zTkMDh^w|O38GLv$AaswRm5)=fj}v$h{%AG^nRW;%7U8#$S#yl%_=Z8qg9qV)
zs}+p_84FC6kmw+hj(8C1G)j}jji=~ZrZ`?F`CutokRVZ#OGP|wQeYaHH#q@+^;D5P
zkzigGlRBwuW;l)iNRmNmKsD(JBAEg}=?qlhA+}flXi0epH~BJ684P!)15gQNQ@ICK
z`GQv&4kJc(h_iz>X=5PaW2Lm-s@NxPW&(VSZyKWBGIj
zf=LQ;>6aI&mTJ}p!OL8DkrlO
zl?B>;t%;y)1)4R(pbZK<^*D}ja+(%dp}2LRe|Hap7*Q0;p;X|ZtQP`$S%oY~qP1jz
zxLE-ynk99GMSb!^Rs?P%%A%-XZ|k`MFpzURhOImWarzJgkZ^-Eb4MUi%0H8*ogndmxJxcfWn7P_d|LhViE{97IKQ6
zi@KfDN4kavP8({4O5MPuS8YD#a1RCg=or_gz+
zyO(zaM+vJ$9)5Bk^F|~)!l#a4pnn>mvF3TH%7p4i6aIl865@~qNUNu}aZ!qqRZyp2
zx^4}@CjK#dFftR9sv&x(an^dQJ1{E82cU+sO3)e*O;@NrmmlV;aOkQ7>bk8Cu&g-H
zb>BJ-$)_alfFaycq;y$zCwi)Z_e%2GsM^tc68Cg)6d+T#rMC&3)~2r@5>FxlvD7+o
zIstaRc&%5_c(Rt2c{W8cl9kHvA-^QDib%4(nWV{@tB}C2lNxZ};IZMsE$23_QiqH(
zYn&Kcvn~d405Pt#Q5F1(1r0m@4ztv(!Z@^J+M^Rdo+z6dvD%!L@O9o7vk0558M~hj
z7_xe57*Cs(@@lZ%Su-Z9uMFENEFvtE#~+!>wlK$dxO%ijVkWbKw!a1tKZm!6LA8jQ
zuoys|c4%}s!m`-pgcIv+4QjaOiJ^Mvc?Qw2Tbl(>GI>BtxOwZgdz)XS_X9xdb+B=<
z0r9o=Nx55wxH*J>*OX5AV@!^FDIDs$=m)!jiU!l_w~^qudN8TMYImUPO^W%Vj;F6>
z8y=D2Z2zG}XaH@FdydDOy2;DCq2YPLOK&ZQcB!-->{K$+iO*7
zZrsprZ-h+|22u{}nIl-gR~x~VrzQ#98;Se@48+gNUC@
zjG{pN#15dt8Qh_j>%%Ebwptvt^OMBL@*T+{wd?ms%5|$E?lC
zET3hArr(^J&&Ks(yyvyS3I^=AU`5esXd|3A^
zkRgGD`y5&tN-$+LfG4a2rRh2Y&7WT=U)f1p30=fi(7<<`pWg+WI$K<%8Hj*(ozW}AED7wOq86+(hoqLJ^<4sEYqJz(;p4Xun>{5$kRE1(_MKA@7y~=
z%>zUYe0NA{jT3`RMQhldh}fH~
P*pB_!kR9145CH%?-Ae(U

literal 0
HcmV?d00001

diff --git a/www/static/favicon.ico b/www/static/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..4d55fa37b2d05fc43460e0a407f830aa42a70dd7
GIT binary patch
literal 894
zcmbV~O(^7X7{@=8+Ey+~E0o%jwMP_1TxeyzY$%(Pa`2wKmBWxTCsWeK&C4v2wk>Hc
z(`rvNG7%xtGz}&7KRGG!%=7E_A2UbK%?{y`ySXjwP1n{Lu4i6WN#N!cqCC^QZgIxoGvT=)3~p&hyu%7;<-tF@cXt;x36Ti?^u0>yBru-L|6$fauGxBR=d
zwxaIt8W@2w@IoE@RP^H;iMxkLYwMT!`Ce!S2Yx^~+{5M@vXnQlq6^uWp8o#$c&n&u
zVz?rd_y8qZ$OrKY3L=q!tSCKQW&?VSWkp5%q=h8cM~e79B^*cYAn^?UU`IKtI+3()
zC0twt4PWxf$$urRSybHQeSd2!Pe~x)bIGQ-zBUghU?prp6z0@~' + h.subject + ' -- ' + h.author_name + '';
+			let hit = '
  • ' + h.subject + ' -- ' + h.author_name + '
  • '; hits += hit; }); hits += ""; diff --git a/www/templates/index.html b/www/templates/index.html index 646c4a0..155ed56 100644 --- a/www/templates/index.html +++ b/www/templates/index.html @@ -1,6 +1,11 @@ - - -

    ---> SEARCH <---

    + + Times of Nettime + + + + \ No newline at end of file diff --git a/www/templates/search.html b/www/templates/search.html index c5b5715..ee42174 100644 --- a/www/templates/search.html +++ b/www/templates/search.html @@ -1,5 +1,6 @@ + Times of Nettime @@ -21,8 +22,21 @@ {% endfor %} +
    Loading...
    +