2016-11-19 19:34:41 +00:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2016-01-14 20:41:14 +00:00
|
|
|
import click
|
|
|
|
import json
|
2016-03-16 14:23:47 +00:00
|
|
|
import os
|
2016-01-14 20:41:14 +00:00
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
import sqlalchemy.types
|
|
|
|
import threading
|
|
|
|
import sqlite3
|
2016-11-19 19:16:56 +00:00
|
|
|
from tqdm import tqdm
|
2016-01-14 20:41:14 +00:00
|
|
|
|
|
|
|
from sqlalchemy import exists
|
|
|
|
|
|
|
|
from bitter import utils, models, crawlers
|
2016-09-28 03:06:12 +00:00
|
|
|
from bitter import config as bconf
|
2016-01-14 20:41:14 +00:00
|
|
|
from bitter.models import make_session, User, ExtractorEntry, Following
|
2016-09-15 11:56:17 +00:00
|
|
|
|
|
|
|
import sys
|
|
|
|
if sys.version_info <= (3, 0):
|
|
|
|
from contextlib2 import ExitStack
|
|
|
|
else:
|
|
|
|
from contextlib import ExitStack
|
|
|
|
|
|
|
|
|
2016-01-14 20:41:14 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
@click.group()
|
|
|
|
@click.option("--verbose", is_flag=True)
|
|
|
|
@click.option("--logging_level", required=False, default='WARN')
|
|
|
|
@click.option("--config", required=False)
|
2016-09-14 17:53:56 +00:00
|
|
|
@click.option('-c', '--credentials', show_default=True, default='~/.bitter-credentials.json')
|
2016-01-14 20:41:14 +00:00
|
|
|
@click.pass_context
|
|
|
|
def main(ctx, verbose, logging_level, config, credentials):
|
|
|
|
logging.basicConfig(level=getattr(logging, logging_level))
|
|
|
|
ctx.obj = {}
|
|
|
|
ctx.obj['VERBOSE'] = verbose
|
|
|
|
ctx.obj['CONFIG'] = config
|
2016-09-28 03:06:12 +00:00
|
|
|
bconf.CREDENTIALS = credentials
|
2016-09-14 17:53:56 +00:00
|
|
|
utils.create_credentials(credentials)
|
2016-01-14 20:41:14 +00:00
|
|
|
|
|
|
|
@main.group()
|
|
|
|
@click.pass_context
|
|
|
|
def tweet(ctx):
|
|
|
|
pass
|
|
|
|
|
|
|
|
@tweet.command('get')
|
2016-11-23 09:48:35 +00:00
|
|
|
@click.option('-w', '--write', is_flag=True, default=False)
|
|
|
|
@click.option('-f', '--folder', default="tweets")
|
|
|
|
@click.option('-u', '--update', help="Update the file even if the tweet exists", is_flag=True, default=False)
|
2016-01-14 20:41:14 +00:00
|
|
|
@click.argument('tweetid')
|
2016-11-23 09:48:35 +00:00
|
|
|
def get_tweet(tweetid, write, folder, update):
|
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-12-05 22:02:26 +00:00
|
|
|
utils.download_tweet(wq, tweetid, write, folder, update)
|
2016-01-14 20:41:14 +00:00
|
|
|
|
2016-11-23 09:48:35 +00:00
|
|
|
@tweet.command('get_all')
|
|
|
|
@click.argument('tweetsfile', 'File with a list of tweets to look up')
|
|
|
|
@click.option('-f', '--folder', default="tweets")
|
|
|
|
@click.pass_context
|
|
|
|
def get_tweets(ctx, tweetsfile, folder):
|
2016-12-05 22:02:26 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
|
|
|
utils.download_tweets(wq, tweetsfile, folder)
|
2016-01-14 20:41:14 +00:00
|
|
|
|
|
|
|
@tweet.command('search')
|
|
|
|
@click.argument('query')
|
|
|
|
@click.pass_context
|
2016-11-23 09:48:35 +00:00
|
|
|
def search(ctx, query):
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-11-18 15:08:29 +00:00
|
|
|
t = utils.search_tweet(wq, query)
|
2016-01-14 20:41:14 +00:00
|
|
|
print(json.dumps(t, indent=2))
|
|
|
|
|
2016-03-16 14:23:47 +00:00
|
|
|
@tweet.command('timeline')
|
|
|
|
@click.argument('user')
|
|
|
|
@click.pass_context
|
2016-11-23 09:48:35 +00:00
|
|
|
def timeline(ctx, user):
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-11-18 15:08:29 +00:00
|
|
|
t = utils.user_timeline(wq, user)
|
2016-03-16 14:23:47 +00:00
|
|
|
print(json.dumps(t, indent=2))
|
|
|
|
|
2016-01-14 20:41:14 +00:00
|
|
|
@main.group()
|
|
|
|
@click.pass_context
|
|
|
|
def users(ctx):
|
|
|
|
pass
|
|
|
|
|
|
|
|
@users.command('list')
|
|
|
|
@click.option('--db', required=True, help='Database of users.')
|
|
|
|
@click.pass_context
|
|
|
|
def list_users(ctx, db):
|
|
|
|
dburl = 'sqlite:///{}'.format(db)
|
|
|
|
session = make_session(dburl)
|
|
|
|
for i in session.query(User):
|
|
|
|
print(i.screen_name)
|
|
|
|
for j in i.__dict__:
|
|
|
|
print('\t{}: {}'.format(j, getattr(i,j)))
|
|
|
|
|
2016-11-23 09:48:35 +00:00
|
|
|
@users.command('get')
|
2016-01-14 20:41:14 +00:00
|
|
|
@click.argument('user')
|
2016-11-22 19:07:18 +00:00
|
|
|
@click.option('-w', '--write', is_flag=True, default=False)
|
|
|
|
@click.option('-f', '--folder', default="users")
|
|
|
|
@click.option('-u', '--update', help="Update the file even if the user exists", is_flag=True, default=False)
|
|
|
|
def get_user(user, write, folder, update):
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-11-22 19:07:18 +00:00
|
|
|
if not write:
|
|
|
|
u = utils.get_user(wq, user)
|
|
|
|
js = json.dumps(u, indent=2)
|
|
|
|
print(js)
|
|
|
|
return
|
|
|
|
if not os.path.exists(folder):
|
|
|
|
os.makedirs(folder)
|
|
|
|
file = os.path.join(folder, '%s.json' % user)
|
|
|
|
if not update and os.path.exists(file) and os.path.isfile(file):
|
|
|
|
print('User exists: %s' % user)
|
|
|
|
return
|
|
|
|
with open(file, 'w') as f:
|
|
|
|
u = utils.get_user(wq, user)
|
|
|
|
js = json.dumps(u, indent=2)
|
|
|
|
print(js, file=f)
|
2016-01-14 20:41:14 +00:00
|
|
|
|
2016-11-23 09:48:35 +00:00
|
|
|
@users.command('get_all')
|
2016-11-22 19:07:18 +00:00
|
|
|
@click.argument('usersfile', 'File with a list of users to look up')
|
|
|
|
@click.option('-f', '--folder', default="users")
|
|
|
|
@click.pass_context
|
|
|
|
def get_users(ctx, usersfile, folder):
|
|
|
|
with open(usersfile) as f:
|
|
|
|
for line in f:
|
|
|
|
uid = line.strip()
|
|
|
|
ctx.invoke(get_user, folder=folder, user=uid, write=True)
|
|
|
|
|
|
|
|
@users.command('crawl')
|
2016-01-14 20:41:14 +00:00
|
|
|
@click.option('--db', required=True, help='Database to save all users.')
|
|
|
|
@click.option('--skip', required=False, default=0, help='Skip N lines from the file.')
|
|
|
|
@click.option('--until', required=False, type=str, default=0, help='Skip all lines until ID.')
|
|
|
|
@click.option('--threads', required=False, type=str, default=20, help='Number of crawling threads.')
|
|
|
|
@click.argument('usersfile', 'File with a list of users to look up')
|
|
|
|
@click.pass_context
|
2016-11-22 19:07:18 +00:00
|
|
|
def crawl_users(ctx, usersfile, skip, until, threads, db):
|
2016-01-14 20:41:14 +00:00
|
|
|
global dburl, ids_queue, skipped, enqueued, collected, lastid, db_lock
|
|
|
|
|
|
|
|
if '://' not in db:
|
|
|
|
dburl = 'sqlite:///{}'.format(db)
|
|
|
|
db_lock = threading.Lock()
|
|
|
|
else:
|
|
|
|
dburl = db
|
|
|
|
def db_lock():
|
|
|
|
return ExitStack()
|
|
|
|
|
|
|
|
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-01-14 20:41:14 +00:00
|
|
|
logger.info('Starting Network crawler with {} threads and {} credentials.'.format(threads,
|
|
|
|
len(wq.queue)))
|
|
|
|
|
|
|
|
ids_queue = queue.Queue(1000)
|
|
|
|
skipped = skip
|
|
|
|
enqueued = 0
|
|
|
|
collected = 0
|
|
|
|
statslock = threading.Lock()
|
|
|
|
lastid = -1
|
|
|
|
|
|
|
|
def fill_queue():
|
|
|
|
global enqueued, skipped
|
|
|
|
with open(usersfile, 'r') as f:
|
|
|
|
sqlite = sqlite3.connect(db)
|
|
|
|
engine = sqlalchemy.create_engine(dburl)
|
|
|
|
def user_filter(x):
|
|
|
|
global skipped, dburl
|
|
|
|
# keep = data['users'].find_one(id=x) is None
|
|
|
|
#keep = not session.query(exists().where(User.id == x)).scalar()
|
|
|
|
# keep = session.engine.execute
|
|
|
|
keep = not list(engine.execute('SELECT 1 from users where id=\'%s\'' % x))
|
|
|
|
|
|
|
|
if not keep:
|
|
|
|
skipped += 1
|
|
|
|
return keep
|
|
|
|
for i in range(skip):
|
|
|
|
next(f)
|
|
|
|
ilist = map(lambda x: x.strip(), f)
|
|
|
|
logger.info('Skipping until {}'.format(until))
|
|
|
|
if not skip and until:
|
|
|
|
for uid in ilist:
|
|
|
|
if uid == until:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
skipped += 1
|
|
|
|
ilist = filter(user_filter, ilist)
|
|
|
|
for uid in ilist:
|
|
|
|
ids_queue.put(uid)
|
|
|
|
enqueued += 1
|
|
|
|
for i in range(threads):
|
|
|
|
ids_queue.put(None)
|
|
|
|
|
|
|
|
def consume_queue():
|
|
|
|
global dburl, collected, ids_queue, lastid
|
|
|
|
local_collected = 0
|
|
|
|
logging.debug('Consuming!')
|
|
|
|
session = make_session(dburl)
|
|
|
|
q_iter = iter(ids_queue.get, None)
|
|
|
|
for user in utils.get_users(wq, q_iter):
|
|
|
|
dbuser = User(**user)
|
|
|
|
session.add(dbuser)
|
|
|
|
local_collected += 1
|
|
|
|
with statslock:
|
|
|
|
collected += 1
|
|
|
|
lastid = user['id']
|
|
|
|
if local_collected % 100 == 0:
|
|
|
|
with db_lock:
|
|
|
|
session.commit()
|
|
|
|
session.commit()
|
|
|
|
logger.debug('Done consuming')
|
|
|
|
|
|
|
|
filler = threading.Thread(target=fill_queue)
|
|
|
|
filler.start()
|
|
|
|
consumers = [threading.Thread(target=consume_queue) for i in range(threads)]
|
|
|
|
logging.debug('Starting consumers')
|
|
|
|
for c in consumers:
|
|
|
|
c.start()
|
|
|
|
logging.debug('Joining filler')
|
|
|
|
counter = 0
|
|
|
|
speed = 0
|
|
|
|
lastcollected = collected
|
|
|
|
while True:
|
|
|
|
filler.join(1)
|
|
|
|
logger.info('########\n'
|
|
|
|
' Collected: {}\n'
|
|
|
|
' Speed: ~ {} profiles/s\n'
|
|
|
|
' Skipped: {}\n'
|
|
|
|
' Enqueued: {}\n'
|
|
|
|
' Queue size: {}\n'
|
|
|
|
' Last ID: {}'.format(collected, speed, skipped, enqueued, ids_queue.qsize(), lastid))
|
|
|
|
if not filler.isAlive():
|
|
|
|
if all(not i.isAlive() for i in consumers):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
time.sleep(1)
|
|
|
|
counter += 1
|
|
|
|
if counter % 10 == 0:
|
|
|
|
speed = (collected-lastcollected)/10
|
|
|
|
with statslock:
|
|
|
|
lastcollected = collected
|
|
|
|
|
|
|
|
logger.info('Done!')
|
|
|
|
|
|
|
|
@main.group('api')
|
|
|
|
def api():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
@main.group('extractor')
|
|
|
|
@click.pass_context
|
|
|
|
@click.option('--db', required=True, help='Database of users.')
|
|
|
|
def extractor(ctx, db):
|
|
|
|
if '://' not in db:
|
|
|
|
db = 'sqlite:///{}'.format(db)
|
|
|
|
ctx.obj['DBURI'] = db
|
|
|
|
ctx.obj['SESSION'] = make_session(db)
|
|
|
|
|
|
|
|
|
|
|
|
@extractor.command('status')
|
|
|
|
@click.option('--with_followers', is_flag=True, default=False)
|
|
|
|
@click.option('--with_not_pending', is_flag=True, default=False)
|
|
|
|
@click.pass_context
|
|
|
|
def status_extractor(ctx, with_followers, with_not_pending):
|
|
|
|
session = ctx.obj['SESSION']
|
|
|
|
entries = session.query(ExtractorEntry)
|
|
|
|
if not with_not_pending:
|
|
|
|
entries = entries.filter(ExtractorEntry.pending==True)
|
|
|
|
for i in entries:
|
|
|
|
print(i.id)
|
|
|
|
for j in i.__dict__:
|
|
|
|
print('\t{}: {}'.format(j, getattr(i,j)))
|
|
|
|
followers = session.query(Following)
|
|
|
|
print('Followers count: {}'.format(followers.count()))
|
|
|
|
if(with_followers):
|
|
|
|
for i in followers:
|
|
|
|
print(i.id)
|
|
|
|
for j in i.__dict__:
|
|
|
|
print('\t{}: {}'.format(j, getattr(i,j)))
|
|
|
|
|
2016-03-16 14:23:47 +00:00
|
|
|
@extractor.command('network')
|
|
|
|
@click.option('--as_json', is_flag=True, default=False)
|
|
|
|
@click.pass_context
|
|
|
|
def network_extractor(ctx, as_json):
|
|
|
|
session = ctx.obj['SESSION']
|
|
|
|
followers = session.query(Following)
|
|
|
|
follower_map = []
|
|
|
|
for i in followers:
|
|
|
|
if not as_json:
|
|
|
|
print('{} -> {}'.format(i.follower, i.isfollowed))
|
|
|
|
else:
|
|
|
|
follower_map.append({'source_id': i.follower,
|
|
|
|
'target_id': i.isfollowed,
|
|
|
|
'following': True})
|
|
|
|
if as_json:
|
|
|
|
import json
|
|
|
|
print(json.dumps(follower_map, indent=4))
|
|
|
|
|
|
|
|
|
|
|
|
@extractor.command('users')
|
|
|
|
@click.pass_context
|
|
|
|
def users_extractor(ctx):
|
|
|
|
session = ctx.obj['SESSION']
|
|
|
|
users = session.query(User)
|
|
|
|
import json
|
|
|
|
for i in users:
|
|
|
|
# print(json.dumps(i.as_dict(), indent=4))
|
|
|
|
dd = i.as_dict()
|
|
|
|
print(json.dumps(dd, indent=4))
|
|
|
|
|
2016-01-14 20:41:14 +00:00
|
|
|
|
|
|
|
@extractor.command()
|
|
|
|
@click.option('--recursive', is_flag=True, help='Get following/follower/info recursively.', default=False)
|
|
|
|
@click.option('-u', '--user', default=None)
|
|
|
|
@click.option('-n', '--name', show_default=True, default='extractor')
|
|
|
|
@click.option('-i', '--initfile', required=False, default=None, help='List of users to load')
|
|
|
|
@click.pass_context
|
|
|
|
def extract(ctx, recursive, user, name, initfile):
|
|
|
|
print(locals())
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-01-14 20:41:14 +00:00
|
|
|
dburi = ctx.obj['DBURI']
|
|
|
|
utils.extract(wq,
|
|
|
|
recursive=recursive,
|
|
|
|
user=user,
|
|
|
|
dburi=dburi,
|
|
|
|
initfile=initfile,
|
|
|
|
extractor_name=name)
|
|
|
|
|
|
|
|
@extractor.command('reset')
|
|
|
|
@click.pass_context
|
|
|
|
def reset_extractor(ctx):
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-01-14 20:41:14 +00:00
|
|
|
db = ctx.obj['DBURI']
|
|
|
|
session = make_session(db)
|
|
|
|
session.query(ExtractorEntry).filter(ExtractorEntry.pending==True).update({'pending':False})
|
|
|
|
|
|
|
|
@api.command('limits')
|
|
|
|
@click.argument('url', required=False)
|
|
|
|
@click.pass_context
|
|
|
|
def get_limits(ctx, url):
|
2016-09-28 03:06:12 +00:00
|
|
|
wq = crawlers.TwitterQueue.from_credentials(bconf.CREDENTIALS)
|
2016-01-14 20:41:14 +00:00
|
|
|
for worker in wq.queue:
|
|
|
|
resp = worker.client.application.rate_limit_status()
|
|
|
|
print('#'*20)
|
|
|
|
print(worker.name)
|
|
|
|
if url:
|
|
|
|
limit = 'NOT FOUND'
|
|
|
|
print('URL is: {}'.format(url))
|
|
|
|
cat = url.split('/')[1]
|
|
|
|
if cat in resp['resources']:
|
|
|
|
limit = resp['resources'][cat].get(url, None) or resp['resources'][cat]
|
|
|
|
else:
|
|
|
|
print('Cat {} not found'.format(cat))
|
|
|
|
print('{}: {}'.format(url, limit))
|
|
|
|
else:
|
|
|
|
print(json.dumps(resp, indent=2))
|
|
|
|
|
2016-09-14 17:53:56 +00:00
|
|
|
@main.command('server')
|
|
|
|
@click.argument('CONSUMER_KEY', required=True)
|
|
|
|
@click.argument('CONSUMER_SECRET', required=True)
|
|
|
|
@click.pass_context
|
|
|
|
def run_server(ctx, consumer_key, consumer_secret):
|
2016-09-28 03:06:12 +00:00
|
|
|
bconf.CONSUMER_KEY = consumer_key
|
|
|
|
bconf.CONSUMER_SECRET = consumer_secret
|
2016-09-14 17:53:56 +00:00
|
|
|
from .webserver import app
|
2016-09-28 03:06:12 +00:00
|
|
|
app.run(host='0.0.0.0')
|
2016-09-14 17:53:56 +00:00
|
|
|
|
2016-11-18 15:08:29 +00:00
|
|
|
@main.group()
|
|
|
|
@click.pass_context
|
|
|
|
def stream(ctx):
|
|
|
|
pass
|
|
|
|
|
|
|
|
@stream.command('get')
|
2016-11-19 19:16:56 +00:00
|
|
|
@click.option('-l', '--locations', default=None)
|
|
|
|
@click.option('-t', '--track', default=None)
|
|
|
|
@click.option('-f', '--file', help='File to store the stream of tweets')
|
2016-11-23 11:27:53 +00:00
|
|
|
@click.option('-p', '--politelyretry', help='Politely retry after a hangup/connection error', is_flag=True, default=True)
|
2016-11-18 15:08:29 +00:00
|
|
|
@click.pass_context
|
2016-11-23 11:27:53 +00:00
|
|
|
def get_stream(ctx, locations, track, file, politelyretry):
|
2016-11-18 15:08:29 +00:00
|
|
|
wq = crawlers.StreamQueue.from_credentials(bconf.CREDENTIALS, 1)
|
|
|
|
|
2016-11-19 19:16:56 +00:00
|
|
|
query_args = {}
|
|
|
|
if locations:
|
|
|
|
query_args['locations'] = locations
|
|
|
|
if track:
|
|
|
|
query_args['track'] = track
|
|
|
|
if not file:
|
|
|
|
file = sys.stdout
|
|
|
|
else:
|
|
|
|
file = open(file, 'a')
|
|
|
|
|
2016-11-23 11:27:53 +00:00
|
|
|
def insist():
|
|
|
|
lasthangup = time.time()
|
|
|
|
while True:
|
|
|
|
if not query_args:
|
|
|
|
iterator = wq.statuses.sample()
|
|
|
|
else:
|
|
|
|
iterator = wq.statuses.filter(**query_args)#"-4.25,40.16,-3.40,40.75")
|
|
|
|
for i in iterator:
|
|
|
|
yield i
|
|
|
|
if not politelyretry:
|
|
|
|
return
|
|
|
|
thishangup = time.time()
|
|
|
|
if thishangup - lasthangup < 60:
|
|
|
|
raise Exception('Too many hangups in a row.')
|
|
|
|
time.sleep(3)
|
|
|
|
|
|
|
|
for tweet in tqdm(insist()):
|
2016-11-19 19:16:56 +00:00
|
|
|
print(json.dumps(tweet), file=file)
|
|
|
|
if file != sys.stdout:
|
|
|
|
file.close()
|
2016-11-18 15:08:29 +00:00
|
|
|
|
2016-11-19 19:16:56 +00:00
|
|
|
@stream.command('read')
|
2016-11-19 19:34:41 +00:00
|
|
|
@click.option('-f', '--file', help='File to read the stream of tweets from', required=True)
|
2016-11-19 20:42:08 +00:00
|
|
|
@click.option('-t', '--tail', is_flag=True, help='Keep reading from the file, like tail', type=bool, default=False)
|
2016-11-19 19:16:56 +00:00
|
|
|
@click.pass_context
|
2016-11-19 19:34:41 +00:00
|
|
|
def read_stream(ctx, file, tail):
|
|
|
|
for tweet in utils.read_file(file, tail=tail):
|
|
|
|
try:
|
|
|
|
print(u'{timestamp_ms}- @{screen_name}: {text}'.format(timestamp_ms=tweet['timestamp_ms'], screen_name=tweet['user']['screen_name'], text=tweet['text']))
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
print('Raw tweet: {}'.format(tweet))
|
2016-11-19 19:16:56 +00:00
|
|
|
|
|
|
|
@stream.command('tags')
|
2016-11-19 19:34:41 +00:00
|
|
|
@click.option('-f', '--file', help='File to read the stream of tweets from', required=True)
|
2016-11-19 19:16:56 +00:00
|
|
|
@click.argument('limit', required=False, default=None, type=int)
|
|
|
|
@click.pass_context
|
|
|
|
def tags_stream(ctx, file, limit):
|
|
|
|
c = utils.get_hashtags(utils.read_file(file))
|
|
|
|
for count, tag in c.most_common(limit):
|
2016-11-19 19:34:41 +00:00
|
|
|
print(u'{} - {}'.format(count, tag))
|
2016-11-19 19:16:56 +00:00
|
|
|
|
2016-09-14 17:53:56 +00:00
|
|
|
|
2016-01-14 20:41:14 +00:00
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|