2210 lines
116 KiB
Python
Executable File
2210 lines
116 KiB
Python
Executable File
# Copyright (C) 2020 Fintic, finticofficial@gmail.com
|
|
#
|
|
# This file is part of Fintic project, developed by Neythen Treloar and Justin Dunn
|
|
#
|
|
# This code can not be copied and/or distributed without the express
|
|
# permission of Fintic
|
|
|
|
import random
|
|
import pickle
|
|
import finnhub
|
|
import time
|
|
import csv
|
|
import pytz
|
|
from datetime import datetime, timedelta
|
|
import subprocess
|
|
import json
|
|
import urllib.request
|
|
import datetime as dt
|
|
import sys, os, base64, hashlib, hmac, select
|
|
import requests
|
|
from pycoingecko import CoinGeckoAPI
|
|
from newsapi import NewsApiClient
|
|
import traceback
|
|
from geopy import geocoders
|
|
from multiprocessing import Process
|
|
|
|
try:
|
|
time.sleep(80)
|
|
f = open('csv/last_updates.json', 'r')
|
|
last_updates = json.load(f)
|
|
f.close()
|
|
last_updates['stocks']['force'] = True
|
|
last_updates['globalstocks']['force'] = True
|
|
last_updates['prepost']['force'] = True
|
|
last_updates['sports_l']['force'] = True
|
|
last_updates['market']['force'] = True
|
|
f = open('csv/last_updates.json', 'w')
|
|
json.dump(last_updates, f)
|
|
f.close()
|
|
except:
|
|
pass
|
|
|
|
try:
|
|
f = open('csv/scheduler.json', 'r')
|
|
schedules = json.load(f)
|
|
f.close()
|
|
|
|
shutdown_schedule_hour = schedules['shutdown']['hour']
|
|
shutdown_schedule_minute = schedules['shutdown']['minute']
|
|
|
|
reboot_schedule_hour = schedules['reboot']['hour']
|
|
reboot_schedule_minute = schedules['reboot']['minute']
|
|
|
|
timezone = schedules['timezone']
|
|
shutdown_enabled = schedules['shutdown']['enabled']
|
|
reboot_enabled = schedules['reboot']['enabled']
|
|
except:
|
|
shutdown_schedule_hour = "00"
|
|
shutdown_schedule_minute = "00"
|
|
|
|
reboot_schedule_hour = "00"
|
|
reboot_schedule_minute = "00"
|
|
|
|
timezone = "GMT"
|
|
shutdown_enabled = False
|
|
reboot_enabled = False
|
|
|
|
|
|
def getInput(Block=False):
|
|
if Block or select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], []):
|
|
msg = sys.stdin.read(1)
|
|
#sys.stdin.flush()
|
|
else:
|
|
msg = ''
|
|
return msg
|
|
|
|
def emptyInfo(symbols, stock_info):
|
|
update = False
|
|
for symbol in symbols:
|
|
if stock_info[symbol] == -1: # stock with no info
|
|
update = True
|
|
return update
|
|
|
|
def updateUpdate(NY_time):
|
|
NY_str = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
f = open('csv/last_update.csv', 'w+')
|
|
f.write(NY_str + '\n')
|
|
f.close()
|
|
|
|
|
|
|
|
# def updateStocks(api_key, logf):
|
|
# try:
|
|
# f = open('csv/stocks_settings.json', 'r')
|
|
# all_stocks_settings = json.load(f)
|
|
# f.close()
|
|
# stock_info = all_stocks_settings['symbols']
|
|
# symbols = list(stock_info.keys())
|
|
|
|
# url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/stocks?symbols='
|
|
|
|
# for symbol in symbols:
|
|
# url += symbol + ','
|
|
|
|
# url += '&apiKey=' + api_key
|
|
# response = requests.get(url)
|
|
# data = response.json()
|
|
|
|
# # stock_info = {}
|
|
# if len(data) > 0:
|
|
# for symbol in symbols:
|
|
# for stock in data:
|
|
# if stock['symbol'] == symbol:
|
|
# stock_info[stock['symbol']] = {'current': stock['price'], 'change': stock['change_since'], 'percent_change':stock['percent']}
|
|
|
|
# all_stocks_settings['symbols'] = stock_info
|
|
# f = open('csv/stocks_settings.json', 'w+')
|
|
# json.dump(all_stocks_settings, f)
|
|
# f.close()
|
|
|
|
# except:
|
|
# pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def getCookiesnCrumb():
|
|
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
cookie_url = 'https://finance.yahoo.com'
|
|
crumb_url = 'https://query1.finance.yahoo.com/v1/test/getcrumb'
|
|
|
|
session = requests.Session()
|
|
session.get(cookie_url, headers=headers)
|
|
crumb = session.get(crumb_url, headers=headers).content.decode('utf-8')
|
|
|
|
with open('session.txt', 'wb') as f:
|
|
pickle.dump(session, f)
|
|
with open('crumb.txt', 'w') as f:
|
|
f.write(crumb)
|
|
|
|
|
|
def human_format(num):
|
|
num = float('{:.3g}'.format(num))
|
|
magnitude = 0
|
|
while abs(num) >= 1000:
|
|
magnitude += 1
|
|
num /= 1000.0
|
|
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
|
|
|
|
|
|
def updateStocks(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/stocks_settings.json', 'r')
|
|
all_stocks_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_stocks_settings = {"feature": "Stocks", "speed": "medium", "speed2": "medium", "animation": "down", "percent": False, "point": True, "logos": True, "chart": False, "title": True, "symbols": {"AAPL": {"current": "164.02", "change": "-1.59", "percent_change": "-0.97"}, "MSFT": {"current": "288.29", "change": "-1.32", "percent_change": "-0.46"}, "GOOG": {"current": "2586.74", "change": "-34.01", "percent_change": "-1.31"}, "NFLX": {"current": "380.52", "change": "-7.59", "percent_change": "-1.99"}}, "prepost": False, "lohivol": False, "display_name": False}
|
|
|
|
stock_info = all_stocks_settings['symbols']
|
|
symbols = list(stock_info.keys())
|
|
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
|
|
# url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/stocks?symbols='
|
|
url1 = 'https://query1.finance.yahoo.com/v7/finance/quote?fields=longName,regularMarketPrice,regularMarketChangePercent,regularMarketChange,regularMarketDayHigh,regularMarketDayLow,regularMarketVolume®ion=US&lang=en-US&symbols='
|
|
url2 = 'https://cloud.iexapis.com/v1/stock/market/batch?&types=quote&token=pk_aff870df1a984daa9dd43c71801c1936&symbols='
|
|
url = random.choice([url1, url2])
|
|
|
|
for symbol in symbols:
|
|
url += symbol + ','
|
|
# url += '&apiKey=' + api_key
|
|
|
|
if 'cloud.iexapis.com' in url:
|
|
response = requests.get(url, headers=headers)
|
|
data = response.json()
|
|
if len(data) > 0:
|
|
for symbol in symbols:
|
|
try:
|
|
if 'name' in stock_info[data[symbol]['quote']['symbol']] and stock_info[data[symbol]['quote']['symbol']]['name'] != '':
|
|
stock_info[data[symbol]['quote']['symbol']] = {'current': str(data[symbol]['quote']['latestPrice']), 'change': str(data[symbol]['quote']['change']), 'percent_change': str(data[symbol]['quote']['changePercent'] * 100), 'day_low': str(data[symbol]['quote']['low']), 'day_high': str(data[symbol]['quote']['high']), 'volume': str(human_format(data[symbol]['quote']['latestVolume'])), 'name': stock_info[data[symbol]['quote']['symbol']]['name']}
|
|
else:
|
|
stock_info[data[symbol]['quote']['symbol']] = {'current': str(data[symbol]['quote']['latestPrice']), 'change': str(data[symbol]['quote']['change']), 'percent_change': str(data[symbol]['quote']['changePercent'] * 100), 'day_low': str(data[symbol]['quote']['low']), 'day_high': str(data[symbol]['quote']['high']), 'volume': str(human_format(data[symbol]['quote']['latestVolume'])), 'name': data[symbol]['quote']['companyName'].split(' - ')[0].replace('Corp.', 'Corp').replace('Corporation', 'Corp')}
|
|
except:
|
|
pass
|
|
all_stocks_settings['symbols'] = stock_info
|
|
with open('csv/stocks_settings.json', 'w+') as f:
|
|
json.dump(all_stocks_settings, f)
|
|
|
|
elif 'query1.finance.yahoo.com/v7' in url:
|
|
url = url.replace('BRK.A', 'BRK-A').replace('BRK.B', 'BRK-B')
|
|
response = requests.get(url, headers=headers)
|
|
data = response.json()
|
|
if "'error': {'code'" in str(data):
|
|
while True:
|
|
try:
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
except:
|
|
getCookiesnCrumb()
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
params = {'crumb': crumb}
|
|
|
|
data = session.get(url, headers=headers, params=params).json()
|
|
|
|
if "'error': {'code'" not in str(data):
|
|
break
|
|
else:
|
|
getCookiesnCrumb()
|
|
time.sleep(5)
|
|
# stock_info = {}
|
|
if len(data) > 0:
|
|
for symbol in symbols:
|
|
try:
|
|
for stock in data['quoteResponse']['result']:
|
|
if stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B') == symbol and 'name' in stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] and stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['name'] != '':
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] = {'current': str(stock['regularMarketPrice']), 'change': str(stock['regularMarketChange']), 'percent_change': str(stock['regularMarketChangePercent']), 'day_low': str(stock['regularMarketDayLow']), 'day_high': str(stock['regularMarketDayHigh']), 'volume': str(human_format(stock['regularMarketVolume'])), 'name': stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['name']}
|
|
elif stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B') == symbol and 'name' not in stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] = {'current': str(stock['regularMarketPrice']), 'change': str(stock['regularMarketChange']), 'percent_change': str(stock['regularMarketChangePercent']), 'day_low': str(stock['regularMarketDayLow']), 'day_high': str(stock['regularMarketDayHigh']), 'volume': str(human_format(stock['regularMarketVolume'])), 'name': stock['longName'].replace(',','').replace('Inc.','Inc').replace('Corporation', 'Corp').replace('Ltd.', 'Ltd').replace('Limited','Ltd')}
|
|
except:
|
|
pass
|
|
all_stocks_settings['symbols'] = stock_info
|
|
with open('csv/stocks_settings.json', 'w+') as f:
|
|
json.dump(all_stocks_settings, f)
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updateStocksPrePost(api_key, logf):
|
|
try:
|
|
f = open('csv/stocks_settings.json', 'r')
|
|
all_stocks_settings = json.load(f)
|
|
f.close()
|
|
stock_info = all_stocks_settings['symbols']
|
|
symbols = list(stock_info.keys())
|
|
|
|
#KEEP THIS JUST IN CASE V7 GOES DOWN prepost_url = 'https://query2.finance.yahoo.com/v6/finance/quote?symbols='
|
|
prepost_url = 'https://query2.finance.yahoo.com/v6/finance/quote?symbols='
|
|
for symbol in symbols:
|
|
prepost_url += symbol + ','
|
|
|
|
prepost_url += '&fields=regularMarketPreviousClose,regularMarketPrice,preMarketPrice,preMarketChangePercent,regularMarketChangePercent,regularMarketChange,preMarketChange,postMarketPrice,postMarketChange,postMarketChangePercent®ion=US&lang=en-US'
|
|
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
|
|
prepost = requests.get(prepost_url.replace('BRK.A', 'BRK-A').replace('BRK.B', 'BRK-B'), headers=headers).json()
|
|
if "'error': {'code'" in str(prepost):
|
|
while True:
|
|
try:
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
except:
|
|
getCookiesnCrumb()
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
params = {'crumb': crumb}
|
|
|
|
prepost = session.get(prepost_url.replace('v6','v7').replace('BRK.A', 'BRK-A').replace('BRK.B', 'BRK-B'), headers=headers, params=params).json()
|
|
|
|
if "'error': {'code'" not in str(prepost):
|
|
break
|
|
else:
|
|
getCookiesnCrumb()
|
|
time.sleep(5)
|
|
|
|
prepost_data = prepost['quoteResponse']['result']
|
|
time_now = datetime.now(pytz.timezone('America/New_York')).strftime("%H:%M EST")
|
|
|
|
if len(prepost_data) > 0:
|
|
for symbol in symbols:
|
|
try:
|
|
for stock in prepost_data:
|
|
if stock['symbol'] == symbol:
|
|
stock_info[stock['symbol']] = {"time_now":time_now}
|
|
try:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['Pre-market'] = {'preprice': '%.2f' % stock['preMarketPrice'],
|
|
'prechange': '%.2f' % stock['preMarketChange'],
|
|
'prepercent': '%.2f' % stock['preMarketChangePercent']}
|
|
except:
|
|
try:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['Pre-market'] = {'preprice': '%.2f' % stock['postMarketPrice'],
|
|
'prechange': '%.2f' % 0,
|
|
'prepercent': '%.2f' % 0}
|
|
except:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['Pre-market'] = {'preprice': '%.2f' % stock['regularMarketPrice'],
|
|
'prechange': '%.2f' % 0,
|
|
'prepercent': '%.2f' % 0}
|
|
try:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['Post-market'] = {'postprice': '%.2f' % stock['postMarketPrice'],
|
|
'postchange': '%.2f' % stock['postMarketChange'],
|
|
'postpercent': '%.2f' % stock['postMarketChangePercent']}
|
|
except:
|
|
stock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['Post-market'] = {'postprice': '%.2f' % stock['regularMarketPrice'],
|
|
'postchange': '%.2f' % 0,
|
|
'postpercent': '%.2f' % 0}
|
|
except:
|
|
pass
|
|
all_stocks_settings['symbols'] = stock_info
|
|
|
|
with open('csv/prepost_settings.json', 'w+') as f:
|
|
json.dump(all_stocks_settings['symbols'], f)
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateGlobalStocks(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/globalstocks_settings.json', 'r')
|
|
all_globalstocks_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_globalstocks_settings = {"feature": "Global Stocks", "speed": "medium", "speed2": "medium", "animation": "continuous", "percent": True, "point": True, "logos": True, "chart": False, "title": True, "lohivol": True, "display_name": False, "symbols": {}}
|
|
|
|
globalstock_info = all_globalstocks_settings['symbols']
|
|
symbols = list(globalstock_info.keys())
|
|
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
|
|
# url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/stocks?symbols='
|
|
url = 'https://query1.finance.yahoo.com/v7/finance/quote?fields=longName,regularMarketPrice,regularMarketChangePercent,regularMarketChange,regularMarketDayHigh,regularMarketDayLow,regularMarketVolume®ion=US&lang=en-US&symbols='
|
|
|
|
for symbol in symbols:
|
|
url += symbol + ','
|
|
# url += '&apiKey=' + api_key
|
|
|
|
url = url.replace('BRK.A', 'BRK-A').replace('BRK.B', 'BRK-B')
|
|
# response = requests.get(url, headers=headers)
|
|
# data = response.json()
|
|
# if "'error': {'code'" in str(data):
|
|
while True:
|
|
try:
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
except:
|
|
getCookiesnCrumb()
|
|
with open('session.txt', 'rb') as f:
|
|
session = pickle.load(f)
|
|
with open('crumb.txt', 'r') as f:
|
|
crumb = f.read()
|
|
params = {'crumb': crumb}
|
|
data = session.get(url, headers=headers, params=params).json()
|
|
if "'error': {'code'" not in str(data):
|
|
break
|
|
else:
|
|
getCookiesnCrumb()
|
|
time.sleep(5)
|
|
# globalstock_info = {}
|
|
if len(data) > 0:
|
|
for symbol in symbols:
|
|
try:
|
|
for stock in data['quoteResponse']['result']:
|
|
if stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B') == symbol and 'name' in globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] and globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['name'] != '':
|
|
globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] = {'current': str(stock['regularMarketPrice']), 'change': str(stock['regularMarketChange']), 'percent_change': str(stock['regularMarketChangePercent']), 'day_low': str(stock['regularMarketDayLow']), 'day_high': str(stock['regularMarketDayHigh']), 'volume': str(human_format(stock['regularMarketVolume'])), 'name': globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]['name']}
|
|
elif stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B') == symbol and 'name' not in globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')]:
|
|
globalstock_info[stock['symbol'].replace('BRK-A', 'BRK.A').replace('BRK-B', 'BRK.B')] = {'current': str(stock['regularMarketPrice']), 'change': str(stock['regularMarketChange']), 'percent_change': str(stock['regularMarketChangePercent']), 'day_low': str(stock['regularMarketDayLow']), 'day_high': str(stock['regularMarketDayHigh']), 'volume': str(human_format(stock['regularMarketVolume'])), 'name': stock['longName'].replace(',','').replace('Inc.','Inc').replace('Corporation', 'Corp').replace('Ltd.', 'Ltd').replace('Limited','Ltd')}
|
|
except:
|
|
pass
|
|
all_globalstocks_settings['symbols'] = globalstock_info
|
|
with open('csv/globalstocks_settings.json', 'w+') as f:
|
|
json.dump(all_globalstocks_settings, f)
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateCommodities(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/commodities_settings.json', 'r')
|
|
all_commodities_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_commodities_settings = {"feature": "Stocks", "speed": "fast", "speed2": "fast", "animation": "down", "percent": True, "point": True, "logos": True, "chart": False, "title": True, "symbols": {"BRENTOIL": {"current": "123.053", "unit": "bbl", "24hr_change": "1.0150", "percent_change": "0.83"}, "WTIOIL": {"current": "121.588", "unit": "bbl", "24hr_change": "0.8902", "percent_change": "0.74"}, "XAU": {"current": "1821.205", "unit": "oz", "24hr_change": "4.0045", "percent_change": "0.22"}, "XAG": {"current": "21.1034", "unit": "oz", "24hr_change": "-0.0550", "percent_change": "-0.26"}, "XCU": {"current": "0.2633", "unit": "oz", "24hr_change": "-0.0006", "percent_change": "-0.22"}, "NG": {"current": "8.6595", "unit": "mmbtu", "24hr_change": "-0.0236", "percent_change": "-0.27"}, "WHEAT": {"current": "393.123", "unit": "ton", "24hr_change": "-1.2642", "percent_change": "-0.32"}, "COTTON": {"current": "1.4494", "unit": "lb", "24hr_change": "0.0004", "percent_change": "0.03"}, "RICE": {"current": "16.3849", "unit": "cwt", "24hr_change": "0.0093", "percent_change": "0.06"}, "SUGAR": {"current": "0.1866", "unit": "lb", "24hr_change": "-0.0007", "percent_change": "-0.40"}, "COCOA": {"current": "2374.074", "unit": "ton", "24hr_change": "2.5206", "percent_change": "0.11"}, "LUMBER": {"current": "527.842", "unit": "oz", "24hr_change": "0.2641", "percent_change": "0.05"}, "SOYBEAN": {"current": "17.1621", "unit": "bu", "24hr_change": "0.0270", "percent_change": "0.16"}}}
|
|
|
|
commodity_info = all_commodities_settings['symbols']
|
|
symbols = list(commodity_info.keys())
|
|
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/commodities?symbols='
|
|
|
|
for symbol in symbols:
|
|
url += symbol + ','
|
|
|
|
url += '&apiKey=' + api_key
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
|
|
commodity_info = {}
|
|
if len(data) > 0:
|
|
for symbol in symbols:
|
|
for commodity in data:
|
|
if commodity['symbol'] == symbol:
|
|
commodity_info[commodity['symbol']] = {'current': commodity['price'], 'unit': commodity['unit'], '24hr_change': commodity['price_over_24hr'], 'percent_change': commodity['percent_over_24hr']}
|
|
|
|
all_commodities_settings['symbols'] = commodity_info
|
|
|
|
with open('csv/commodities_settings.json', 'w+') as f:
|
|
json.dump(all_commodities_settings, f)
|
|
|
|
except:
|
|
pass
|
|
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updateMovies(api_key, logf):
|
|
|
|
try:
|
|
f = open('csv/movie_settings.json', 'r')
|
|
all_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_settings = {"feature": "Movies", "speed": "fast", "speed2": "fast", "animation": "continuous", "category": "Popular All", "title": True, "movies": [{"title": "Avatar: The Way of Water", "language": "EN", "votes": "8.1", "date": "2022-12-14", "media_type": "Movie", "genre": ["Sci-Fi", "Action", "Adventure"], "backdrop": "198vrF8k7mfQ4FjDJsBmdQcaiyq.jpg", "logo": "https://image.tmdb.org/t/p/w500/198vrF8k7mfQ4FjDJsBmdQcaiyq.jpg"}, {"title": "Violent Night", "language": "EN", "votes": "7.3", "date": "2022-11-30", "media_type": "Movie", "genre": ["Action", "Comedy", "Crime", "Thriller"], "backdrop": "g9Kb3RaLjsybI1jpqHQ3QZTCYpB.jpg", "logo": "https://image.tmdb.org/t/p/w500/g9Kb3RaLjsybI1jpqHQ3QZTCYpB.jpg"}, {"title": "Avatar", "language": "EN", "votes": "7.5", "date": "2009-12-15", "media_type": "Movie", "genre": ["Action", "Adventure", "Fantasy", "Sci-Fi"], "backdrop": "Yc9q6QuWrMp9nuDm5R8ExNqbEq.jpg", "logo": "https://image.tmdb.org/t/p/w500/Yc9q6QuWrMp9nuDm5R8ExNqbEq.jpg"}, {"title": "The Banshees of Inisherin", "language": "EN", "votes": "7.7", "date": "2022-10-21", "media_type": "Movie", "genre": ["Drama", "Comedy"], "backdrop": "9Md4CqzUGDtK5oEkRRvozLkGc9d.jpg", "logo": "https://image.tmdb.org/t/p/w500/9Md4CqzUGDtK5oEkRRvozLkGc9d.jpg"}, {"title": "Wednesday", "language": "EN", "votes": "8.8", "date": "2022-11-23", "media_type": "Tv", "genre": ["Sci-Fi & Fantasy", "Mystery", "Comedy"], "backdrop": "iHSwvRVsRyxpX7FE7GbviaDvgGZ.jpg", "logo": "https://image.tmdb.org/t/p/w500/iHSwvRVsRyxpX7FE7GbviaDvgGZ.jpg"}, {"title": "1923", "language": "EN", "votes": "8.8", "date": "2022-12-18", "media_type": "Tv", "genre": ["Drama", "Western"], "backdrop": "9I6LgZ5110ycg4pyobJxGTFWFCF.jpg", "logo": "https://image.tmdb.org/t/p/w500/9I6LgZ5110ycg4pyobJxGTFWFCF.jpg"}, {"title": "The Recruit", "language": "EN", "votes": "7.2", "date": "2022-12-16", "media_type": "Tv", "genre": ["Drama", "Crime"], "backdrop": "rey2eh6752C2UbGYRileKk1PVTo.jpg", "logo": "https://image.tmdb.org/t/p/w500/rey2eh6752C2UbGYRileKk1PVTo.jpg"}, {"title": "Black Adam", "language": "EN", "votes": "7.2", "date": "2022-10-19", "media_type": "Movie", "genre": ["Action", "Fantasy", "Sci-Fi"], "backdrop": "bQXAqRx2Fgc46uCVWgoPz5L5Dtr.jpg", "logo": "https://image.tmdb.org/t/p/w500/bQXAqRx2Fgc46uCVWgoPz5L5Dtr.jpg"}, {"title": "Nanny", "language": "EN", "votes": "5.4", "date": "2022-11-23", "media_type": "Movie", "genre": ["Horror", "Drama"], "backdrop": "nfuPlOK6ywGzKGb0yf7VJKyTFWb.jpg", "logo": "https://image.tmdb.org/t/p/w500/nfuPlOK6ywGzKGb0yf7VJKyTFWb.jpg"}, {"title": "Tom Clancys Jack Ryan", "language": "EN", "votes": "7.7", "date": "2018-08-30", "media_type": "Tv", "genre": ["Action & Adventure", "Drama", "War & Politics"], "backdrop": "6ovk8nrrSmN1ieT14zBAxcHbMU7.jpg", "logo": "https://image.tmdb.org/t/p/w500/6ovk8nrrSmN1ieT14zBAxcHbMU7.jpg"}, {"title": "High Heat", "language": "EN", "votes": "6.5", "date": "2022-12-16", "media_type": "Movie", "genre": ["Action", "Comedy", "Crime"], "backdrop": "gjNM0odqkq5F7V58OjfTxPJ9p9Z.jpg", "logo": "https://image.tmdb.org/t/p/w500/gjNM0odqkq5F7V58OjfTxPJ9p9Z.jpg"}, {"title": "A Not So Merry Christmas", "language": "ES", "votes": "4.8", "date": "2022-12-20", "media_type": "Movie", "genre": ["Comedy"], "backdrop": "8uyJzaiGbiezZ9K48Cy5wXeqnYw.jpg", "logo": "https://image.tmdb.org/t/p/w500/8uyJzaiGbiezZ9K48Cy5wXeqnYw.jpg"}, {"title": "Guillermo del Toros Pinocchio", "language": "EN", "votes": "8.5", "date": "2022-11-09", "media_type": "Movie", "genre": ["Animation", "Fantasy", "Drama"], "backdrop": "e782pDRAlu4BG0ahd777n8zfPzZ.jpg", "logo": "https://image.tmdb.org/t/p/w500/e782pDRAlu4BG0ahd777n8zfPzZ.jpg"}, {"title": "His Dark Materials", "language": "EN", "votes": "8.0", "date": "2019-11-03", "media_type": "Tv", "genre": ["Sci-Fi & Fantasy", "Drama"], "backdrop": "dGOhplPZTL0SKyb0ocTFBHIuKUC.jpg", "logo": "https://image.tmdb.org/t/p/w500/dGOhplPZTL0SKyb0ocTFBHIuKUC.jpg"}, {"title": "The Fabelmans", "language": "EN", "votes": "7.8", "date": "2022-11-11", "media_type": "Movie", "genre": ["Drama", "Comedy"], "backdrop": "6RCf9jzKxyjblYV4CseayK6bcJo.jpg", "logo": "https://image.tmdb.org/t/p/w500/6RCf9jzKxyjblYV4CseayK6bcJo.jpg"}, {"title": "The Seven Deadly Sins: Grudge of Edinburgh Part 1", "language": "JA", "votes": "7.8", "date": "2022-12-20", "media_type": "Movie", "genre": ["Animation", "Fantasy", "Adventure", "Action"], "backdrop": "24fe6ou97ammOg3O6ShCgaiolp4.jpg", "logo": "https://image.tmdb.org/t/p/w500/24fe6ou97ammOg3O6ShCgaiolp4.jpg"}, {"title": "Mindcage", "language": "EN", "votes": "7.6", "date": "2022-12-16", "media_type": "Movie", "genre": ["Mystery", "Thriller", "Crime", "Drama"], "backdrop": "An2M2gm0p8POaiGTcZvP1JnUItH.jpg", "logo": "https://image.tmdb.org/t/p/w500/An2M2gm0p8POaiGTcZvP1JnUItH.jpg"}, {"title": "Private Lesson", "language": "TR", "votes": "7.3", "date": "2022-12-16", "media_type": "Movie", "genre": ["Comedy", "Romance"], "backdrop": "uZtYhcnk3WWvUzQkJLqnNywMQpb.jpg", "logo": "https://image.tmdb.org/t/p/w500/uZtYhcnk3WWvUzQkJLqnNywMQpb.jpg"}, {"title": "Sonic Prime", "language": "EN", "votes": "8.7", "date": "2022-12-15", "media_type": "Tv", "genre": ["Animation", "Family"], "backdrop": "1Iiz2uLcZuLn4Khog2yiKpbl11.jpg", "logo": "https://image.tmdb.org/t/p/w500/1Iiz2uLcZuLn4Khog2yiKpbl11.jpg"}, {"title": "The Big 4", "language": "ID", "votes": "7.0", "date": "2022-12-19", "media_type": "Movie", "genre": ["Action", "Comedy", "Crime"], "backdrop": "clO1mWRYT24ogzN3o6LsqHjqrQu.jpg", "logo": "https://image.tmdb.org/t/p/w500/clO1mWRYT24ogzN3o6LsqHjqrQu.jpg"}]}
|
|
|
|
if all_settings['category'] == 'Popular Movies':
|
|
url = 'https://api.themoviedb.org/3/trending/movie/day?'
|
|
movieGenre_url = 'https://api.themoviedb.org/3/genre/movie/list?api_key=' + api_key + '&language=en-US'
|
|
movieGenre_response = requests.get(movieGenre_url)
|
|
movie_genres = movieGenre_response.json()
|
|
|
|
elif all_settings['category'] == 'Popular TV':
|
|
url = 'https://api.themoviedb.org/3/trending/tv/day?'
|
|
tvGenre_url = 'https://api.themoviedb.org/3/genre/tv/list?api_key=' + api_key + '&language=en-US'
|
|
tvGenre_response = requests.get(tvGenre_url)
|
|
tv_genres = tvGenre_response.json()
|
|
|
|
elif all_settings['category'] == 'Popular All':
|
|
url = 'https://api.themoviedb.org/3/trending/all/day?'
|
|
movieGenre_url = 'https://api.themoviedb.org/3/genre/movie/list?api_key=' + api_key + '&language=en-US'
|
|
movieGenre_response = requests.get(movieGenre_url)
|
|
movie_genres = movieGenre_response.json()
|
|
tvGenre_url = 'https://api.themoviedb.org/3/genre/tv/list?api_key=' + api_key + '&language=en-US'
|
|
tvGenre_response = requests.get(tvGenre_url)
|
|
tv_genres = tvGenre_response.json()
|
|
|
|
url += 'api_key=' + api_key
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
this_out = []
|
|
logo_files = []
|
|
|
|
if len(data) > 0:
|
|
movies = data['results']
|
|
|
|
for movie in movies:
|
|
|
|
if movie['media_type'] == 'movie':
|
|
movie_id = movie['id']
|
|
box_office_url = 'https://api.themoviedb.org/3/movie/' + str(movie_id) + '?api_key=' + api_key
|
|
box_office_response = requests.get(box_office_url)
|
|
box_office_data = box_office_response.json()
|
|
budget = human_format(box_office_data['budget'])
|
|
revenue = human_format(box_office_data['revenue'])
|
|
else:
|
|
budget = '0'
|
|
revenue = '0'
|
|
|
|
movie_language = movie['original_language']
|
|
movie_votes = movie['vote_average']
|
|
movie_votes = "{:.1f}".format(movie_votes)
|
|
try:
|
|
movie_titles = movie['title']
|
|
movie_date = movie['release_date']
|
|
except KeyError:
|
|
movie_titles = movie['name']
|
|
movie_date = movie['first_air_date']
|
|
movie_type = movie['media_type']
|
|
movie_genre = movie['genre_ids']
|
|
movie_logo = 'https://image.tmdb.org/t/p/w500' + movie['backdrop_path']
|
|
genrefinal = []
|
|
|
|
if all_settings['category'] == 'Popular Movies':
|
|
for i in movie_genre:
|
|
for genre in movie_genres['genres']:
|
|
if genre['name'] == 'Science Fiction':
|
|
genre['name'] = 'Sci-Fi'
|
|
if i == genre['id']:
|
|
i = genre['name']
|
|
genrefinal.append(i)
|
|
elif all_settings['category'] == 'Popular TV':
|
|
for i in movie_genre:
|
|
for genre in tv_genres['genres']:
|
|
if i == genre['id']:
|
|
i = genre['name']
|
|
genrefinal.append(i)
|
|
elif all_settings['category'] == 'Popular All':
|
|
if movie['media_type'] == 'movie':
|
|
for i in movie_genre:
|
|
for genre in movie_genres['genres']:
|
|
if genre['name'] == 'Science Fiction':
|
|
genre['name'] = 'Sci-Fi'
|
|
if i == genre['id']:
|
|
i = genre['name']
|
|
genrefinal.append(i)
|
|
elif movie['media_type'] == 'tv':
|
|
for i in movie_genre:
|
|
for genre in tv_genres['genres']:
|
|
if i == genre['id']:
|
|
i = genre['name']
|
|
genrefinal.append(i)
|
|
|
|
this_out.append({'title':movie_titles,
|
|
'language':movie_language.upper(),
|
|
'votes':str(movie_votes),
|
|
'date':movie_date,
|
|
'media_type':movie_type.capitalize(),
|
|
'genre':genrefinal,
|
|
'budget':budget,
|
|
'revenue':revenue,
|
|
'backdrop':movie['backdrop_path'][1:],
|
|
'logo': movie_logo
|
|
})
|
|
|
|
logo_files.append(movie['backdrop_path'][1:])
|
|
|
|
if movie['backdrop_path'][1:] not in os.listdir('logos/movies/'):
|
|
urllib.request.urlretrieve(movie_logo,'logos/movies/' + movie['backdrop_path'])
|
|
time.sleep(0.5)
|
|
|
|
for file in os.listdir('logos/movies/'):
|
|
if file not in logo_files:
|
|
os.remove('logos/movies/'+file)
|
|
|
|
all_settings['movies'] = this_out
|
|
|
|
with open('csv/movie_settings.json', 'w+') as f:
|
|
json.dump(all_settings, f)
|
|
|
|
|
|
def updateIpo(api_key, logf):
|
|
|
|
day = datetime.now(pytz.utc).strftime("%Y-%m-%d")
|
|
dt = datetime.strptime(day, "%Y-%m-%d")
|
|
|
|
start = (dt - timedelta(days=dt.weekday()))
|
|
start_date = start.strftime("%Y-%m-%d")
|
|
|
|
end = start + timedelta(days=21)
|
|
end_date = end.strftime("%Y-%m-%d")
|
|
|
|
ipo_url = 'https://finnhub.io/api/v1/calendar/ipo?from='+start_date+'&to='+end_date+'&token='+api_key
|
|
|
|
try:
|
|
f = open('csv/ipo_settings.json', 'r')
|
|
ipo_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
ipo_settings = {"feature": "IPO", "speed": "medium", "speed2": "medium", "animation": "down", "title": True, "symbols": ["No Data"]}
|
|
|
|
data = requests.get(ipo_url)
|
|
all_ipo = data.json()
|
|
|
|
ipo_list = []
|
|
|
|
try:
|
|
if len(all_ipo['ipoCalendar']) > 0:
|
|
for ipo in all_ipo['ipoCalendar']:
|
|
try:
|
|
shares = human_format(ipo['numberOfShares'])
|
|
except:
|
|
shares = 'N/A'
|
|
try:
|
|
sharesvalue = human_format(ipo['totalSharesValue'])
|
|
except:
|
|
sharesvalue = 'N/A'
|
|
|
|
ipo_list.append({
|
|
'date':ipo['date'],
|
|
'name':ipo['name'],
|
|
'shares':shares,
|
|
'price':ipo['price'],
|
|
'status':ipo['status'],
|
|
'symbol':ipo['symbol'],
|
|
'sharesvalue':sharesvalue
|
|
})
|
|
else:
|
|
ipo_list = ['No Data']
|
|
except:
|
|
ipo_list = ['No Data']
|
|
|
|
ipo_settings['symbols'] = ipo_list
|
|
with open('csv/ipo_settings.json', 'w+') as f:
|
|
json.dump(ipo_settings, f)
|
|
|
|
|
|
def updateIndices(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/indices_settings.json', 'r')
|
|
all_indices_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_indices_settings = {"feature": "Stocks", "speed": "fast", "speed2": "slow", "animation": "up", "percent": True, "point": True, "logos": True, "chart": False, "title": True, "symbols": {"^HSI": {"name": "HSI", "current": "18083.06", "point_change": "1003.55", "percent_change": "5.88"}, "^GSPC": {"name": "S&P 500", "current": "3790.93", "point_change": "112.50", "percent_change": "3.06"}, "^RUT": {"name": "RUSSELL 2000", "current": "1775.77", "point_change": "66.90", "percent_change": "3.91"}, "^GDAXI": {"name": "DAX", "current": "12648.95", "point_change": "-21.53", "percent_change": "-0.17"}, "^FTSE": {"name": "FTSE 100", "current": "7058.68", "point_change": "-27.82", "percent_change": "-0.39"}, "^FCHI": {"name": "CAC 40", "current": "6031.45", "point_change": "-8.24", "percent_change": "-0.14"}, "399001.SZ": {"name": "SZSE", "current": "10778.61", "point_change": "-140.83", "percent_change": "-1.29"}, "^STOXX50E": {"name": "STOXX 50", "current": "3476.55", "point_change": "-7.93", "percent_change": "-0.23"}, "^AXJO": {"name": "ASX 200", "current": "6815.70", "point_change": "116.40", "percent_change": "1.74"}, "^DJI": {"name": "DOW JONES", "current": "30316.32", "point_change": "825.43", "percent_change": "2.80"}, "^STOXX": {"name": "STOXX 600", "current": "402.33", "point_change": "-0.70", "percent_change": "-0.17"}}}
|
|
|
|
index_info = all_indices_settings['symbols']
|
|
symbols = list(index_info.keys())
|
|
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/indices?symbols='
|
|
|
|
for symbol in symbols:
|
|
url += symbol + ','
|
|
|
|
url += '&apiKey=' + api_key
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
|
|
index_info = {}
|
|
if len(data) > 0:
|
|
for symbol in symbols:
|
|
for index in data:
|
|
if index['symbol'] == symbol:
|
|
index_info[index['symbol']] = {'name': index['name'], 'current': index['price'], 'point_change': index['change'], 'percent_change': index['percent_change']}
|
|
|
|
all_indices_settings['symbols'] = index_info
|
|
with open('csv/indices_settings.json', 'w+') as f:
|
|
json.dump(all_indices_settings, f)
|
|
|
|
except:
|
|
pass
|
|
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
|
|
def updateCrypto(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/crypto_settings.json', 'r')
|
|
all_crypto_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_crypto_settings = {"feature": "Stocks", "speed": "medium","speed2": "medium", "animation": "down", "percent": False, "point": True, "logos": True, "chart": False, "title": True, "lohivol": False, "display_name": False, "symbols": {"ETH,USD": {"current": "2629.32", "24hr_change": "-27.6432", "percent_change": "-1.04"}, "BTC,USD": {"current": "38161.00", "24hr_change": "-50.8386", "percent_change": "-0.13"}, "BNB,USD": {"current": "372.57", "24hr_change": "0.4140", "percent_change": "0.11"}, "ADA,BTC": {"current": "0.0000", "24hr_change": "-0.0000", "percent_change": "-3.74"}}}
|
|
|
|
coin_info = all_crypto_settings['symbols']
|
|
symbol_base = list(coin_info.keys())
|
|
|
|
symbols = [sb.split(',')[0] for sb in symbol_base]
|
|
bases = [sb.split(',')[1] for sb in symbol_base]
|
|
unique_bases = list(set(bases))
|
|
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/crypto?symbols='
|
|
|
|
for i,s in enumerate(symbols):
|
|
url += bases[i] + '-' + s + ','
|
|
url = url[:-1] #remove last comma
|
|
url += '&apiKey=' + api_key
|
|
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
coin_info = {}
|
|
if len(data) > 0:
|
|
for sb in symbol_base:
|
|
for i,d in enumerate(data):
|
|
|
|
symbol = d['symbol']
|
|
base = d['currency']
|
|
|
|
if symbol.upper() + ',' + base.upper() == sb:
|
|
|
|
coin_info[symbol.upper() + ',' + base.upper()] = {'current': d['price'], '24hr_change': d['price_over_24hr'], 'percent_change': d['percent_over_24hr'], 'volume': human_format(d['volume']), 'day_low': d['day_low'], 'day_high': d['day_high'], 'name': d['name']}
|
|
|
|
all_crypto_settings['symbols'] = coin_info
|
|
with open('csv/crypto_settings.json', 'w+') as f:
|
|
json.dump(all_crypto_settings, f)
|
|
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
def updateForex(api_key, logf):
|
|
|
|
try:
|
|
try:
|
|
f = open('csv/forex_settings.json', 'r')
|
|
all_forex_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_forex_settings = {"feature": "Stocks", "speed": "medium", "speed2": "medium", "animation": "down", "percent": False, "point": True, "logos": True, "chart": False, "title": True, "symbols": {"EUR,USD": {"current": "1.1334", "24hr_change": "-0.0003", "percent_change": "0.00"}, "USD,JPY": {"current": "114.960", "24hr_change": "0.1600", "percent_change": "0.14"}, "GBP,USD": {"current": "1.3577", "24hr_change": "-0.0031", "percent_change": "-0.23"}, "USD,CHF": {"current": "0.9198", "24hr_change": "0.0029", "percent_change": "0.32"}}}
|
|
|
|
forex_info = all_forex_settings['symbols']
|
|
symbol_base = list(forex_info.keys())
|
|
|
|
symbols = [sb.split(',')[0] for sb in symbol_base]
|
|
bases = [sb.split(',')[1] for sb in symbol_base]
|
|
unique_bases = list(set(bases))
|
|
|
|
targets = ','.join(symbols)
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/forex?symbols='
|
|
for i,s in enumerate(symbols):
|
|
url += s + '-' + bases[i] + ','
|
|
url = url[:-1] #remove last comma
|
|
url += '&apiKey=' + api_key
|
|
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
if len(data) > 0:
|
|
c_dict = {}
|
|
for sb in symbol_base:
|
|
for d in data:
|
|
if d['uid'].replace('/',',') == sb:
|
|
c_dict[d['uid'].replace('/',',')] = {'current': d['rate'], '24hr_change': d['rate_over_24hr'], 'percent_change':d['percent_over_24hr']}
|
|
|
|
all_forex_settings['symbols'] = c_dict
|
|
|
|
with open('csv/forex_settings.json', 'w+') as f:
|
|
json.dump(all_forex_settings, f)
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updateEconomic(api_key, logf):
|
|
|
|
try:
|
|
with open('csv/economic_settings.json','r') as f:
|
|
all_economic_settings = json.load(f)
|
|
except:
|
|
all_economic_settings = {"feature": "Economic Calendar", "speed": "medium", "speed2": "medium", "animation": "up", "importance": "Med - High", "title": True, "timezone": "US/Eastern", "countries": ["United States"], "events": []}
|
|
|
|
try:
|
|
url = 'https://economic-calendar.tradingview.com/events'
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
country_codes = {
|
|
'United States':'US', 'Canada':'CA', 'Hong Kong':'HK', 'China':'CN', 'Germany':'DE','France':'FR', 'Italy':'IT', 'Singapore':'SG',
|
|
'South Korea':'KR', 'Japan':'JP', 'Australia':'AU', 'New Zealand':'NZ', 'India':'IN', 'Switzerland':'CH', 'United Kingdom':'GB',
|
|
'Spain':'ES', 'Portugal':'PT', 'Netherlands':'NL', 'Belgium':'BE', 'Austria':'AT', 'Denmark':'DK', 'Turkey':'TR', 'Brazil':'BR',
|
|
'Mexico':'MX', 'Sweden':'SE', 'Finland':'FI', 'Norway':'NO', 'Taiwan':'TW', 'Indonesia':'ID', 'Philippines':'PH', 'Thailand':'TH',
|
|
'South Africa':'ZA'}
|
|
|
|
country = all_economic_settings['countries']
|
|
if all_economic_settings['importance'] == 'Low - High':
|
|
importance = -1
|
|
elif all_economic_settings['importance'] == 'Med - High':
|
|
importance = 0
|
|
elif all_economic_settings['importance'] == 'High':
|
|
importance = 1
|
|
country_codes_list = [country_codes[c] for c in country]
|
|
result = ",".join(country_codes_list)
|
|
timezone = pytz.timezone(all_economic_settings['timezone'])
|
|
date_local = datetime.now(timezone)
|
|
date_local = (date_local - timedelta(hours=2)).strftime('%Y-%m-%dT%H:%M:00')
|
|
#date_local = datetime.now(timezone).strftime('%Y-%m-%dT00:00:00')
|
|
date_local2 = datetime.now(timezone).strftime('%Y-%m-%dT23:59:00')
|
|
date_from = timezone.localize(datetime.strptime(date_local,'%Y-%m-%dT%H:%M:%S')).astimezone(pytz.timezone('GMT')).strftime('%Y-%m-%dT%H:%M:%S.000Z')
|
|
date_to = timezone.localize(datetime.strptime(date_local2,'%Y-%m-%dT%H:%M:%S')).astimezone(pytz.timezone('GMT')).strftime('%Y-%m-%dT%H:%M:%S.000Z')
|
|
|
|
params = {
|
|
'from': date_from,
|
|
'to': date_to,
|
|
'countries': result,
|
|
'minImportance': importance
|
|
}
|
|
|
|
data = requests.get(url, params=params, headers=headers).json()
|
|
events = []
|
|
|
|
try:
|
|
for each in data['result']:
|
|
try:
|
|
time = pytz.timezone('GMT').localize(datetime.strptime(each['date'], '%Y-%m-%dT%H:%M:%S.%fZ')).astimezone(timezone).strftime('%a %m/%d %H:%M%p')
|
|
if datetime.now(timezone).strftime('%a %m/%d %H:%M%p') >= time:
|
|
happened = True
|
|
else:
|
|
happened = False
|
|
try:
|
|
unit = str(each['unit'])
|
|
except:
|
|
unit = ''
|
|
try:
|
|
scale = str(each['scale'])
|
|
except:
|
|
scale = ''
|
|
if scale != '':
|
|
if each['actual'] != '' and each['actual'] is not None:
|
|
each['actual'] = str(each['actual']) + scale
|
|
if each['previous'] != '' and each['previous'] is not None:
|
|
each['previous'] = str(each['previous']) + scale
|
|
if each['forecast'] != '' and each['forecast'] is not None:
|
|
each['forecast'] = str(each['forecast']) + scale
|
|
if unit != '' and scale != '':
|
|
if each['actual'] != '' and each['actual'] is not None:
|
|
each['actual'] = str(each['actual']) + ' ' + unit
|
|
if each['previous'] != '' and each['previous'] is not None:
|
|
each['previous']= str(each['previous']) + ' ' + unit
|
|
if each['forecast'] != '' and each['forecast'] is not None:
|
|
each['forecast'] = str(each['forecast']) + ' ' + unit
|
|
elif unit != '' and scale == '':
|
|
if each['actual'] != '' and each['actual'] is not None:
|
|
each['actual'] = str(each['actual']) + unit
|
|
if each['previous'] != '' and each['previous'] is not None:
|
|
each['previous']= str(each['previous']) + unit
|
|
if each['forecast'] != '' and each['forecast'] is not None:
|
|
each['forecast'] = str(each['forecast']) + unit
|
|
|
|
events.append({
|
|
'title':each['title'],
|
|
'indicator':each['indicator'],
|
|
'period':each['period'],
|
|
'country':each['country'],
|
|
'importance':each['importance'],
|
|
'time':time,
|
|
'actual':each['actual'],
|
|
'previous':each['previous'],
|
|
'forecast':each['forecast'],
|
|
'happened': happened
|
|
})
|
|
except:
|
|
pass
|
|
except:
|
|
pass
|
|
all_economic_settings['events'] = events
|
|
with open('csv/economic_settings.json', 'w+') as f:
|
|
json.dump(all_economic_settings, f)
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateJokes(api_key, logf):
|
|
try:
|
|
with open('csv/jokes_settings.json', 'r') as f:
|
|
jokes_settings = json.load(f)
|
|
except:
|
|
jokes_settings = {"feature": "Jokes", "speed": "medium", "speed2": "medium", "animation": "up", "title": True, "safe": True, "categories": ["Any"], "blacklist": [], "amount": "5", "jokes": []}
|
|
try:
|
|
if 'Any' in jokes_settings['categories']:
|
|
categories = 'Any'
|
|
else:
|
|
categories = ','.join(jokes_settings['categories'])
|
|
blacklist = ','.join(jokes_settings['blacklist'])
|
|
amount = jokes_settings['amount']
|
|
jokes_settings['jokes'] = []
|
|
|
|
joke_url = 'https://v2.jokeapi.dev/joke/'+categories+'?amount='+amount
|
|
if blacklist != '':
|
|
joke_url = 'https://v2.jokeapi.dev/joke/'+categories+ '?blacklistFlags=' + blacklist + '&amount='+amount
|
|
if jokes_settings['safe']:
|
|
joke_url += '&safe-mode'
|
|
|
|
get_jokes = requests.get(joke_url)
|
|
all_jokes = get_jokes.json()
|
|
|
|
for each_joke in all_jokes['jokes']:
|
|
jokes_settings['jokes'].append(each_joke)
|
|
|
|
with open('csv/jokes_settings.json', 'w') as f:
|
|
json.dump(jokes_settings,f)
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateQuotes(api_key, logf):
|
|
try:
|
|
with open('csv/quotes_settings.json', 'r') as f:
|
|
quotes_settings = json.load(f)
|
|
except:
|
|
quotes_settings = {"feature": "Inspirational Quotes", "speed": "medium", "speed2": "medium", "animation": "up", "title": True, "amount": "3", "quotes": []}
|
|
try:
|
|
number = int(quotes_settings['amount'])
|
|
url = 'https://zenquotes.io/api/random'
|
|
quotes_settings['quotes'] = []
|
|
for _ in range(number):
|
|
try:
|
|
data = requests.get(url).json()[0]
|
|
quote = data['q']
|
|
author = data['a']
|
|
quotes_settings['quotes'].append({
|
|
'quote': quote,
|
|
'author': author
|
|
})
|
|
except:
|
|
pass
|
|
with open('csv/quotes_settings.json', 'w') as f:
|
|
json.dump(quotes_settings,f)
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateMarket(api_key, logf):
|
|
try:
|
|
try:
|
|
f = open('csv/market_settings.json', 'r')
|
|
all_market_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_market_settings = {"feature": "Gainers, Losers, Active", "speed": "medium", "speed2": "medium", "animation": "up", "percent": True, "point": True, "logos": True, "chart": False, "title": True, "lohivol": False, "display_name": False, "categories": ["Top Gainers", "Top Losers", "Most Active"], "gainers": {}, "losers": {}, "mostactive": {}}
|
|
|
|
try:
|
|
f = open('csv/sector_settings.json', 'r')
|
|
all_sector_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_sector_settings = {"feature": "Sector Performance", "speed": "medium", "speed2": "medium", "animation": "up", "logos": True, "title": True, "sectors": ["Energy", "Financials", "Real Estate", "Technology"], "data": {}}
|
|
|
|
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
|
|
url = 'https://cloud.iexapis.com/stable/stock/market/overview?token=pk_aff870df1a984daa9dd43c71801c1936&'
|
|
data = requests.get(url, headers=headers).json()
|
|
|
|
mostactive = data['mostactive']
|
|
gainers = data['gainers']
|
|
losers = data['losers']
|
|
sectors = data['sectorPerformance']
|
|
all_market_settings['losers'] = {}
|
|
all_market_settings['gainers'] = {}
|
|
all_market_settings['mostactive'] = {}
|
|
all_sector_settings['data'] = {}
|
|
|
|
try:
|
|
for item in losers:
|
|
all_market_settings['losers'][item['symbol']] = {
|
|
"current": str(item['latestPrice']),
|
|
"change": str(item['change']),
|
|
"percent_change": str(item['changePercent'] * 100),
|
|
"day_low": str(item['low']),
|
|
"day_high": str(item['high']),
|
|
"volume": human_format(item['latestVolume']),
|
|
"name": item['companyName'].split(' - ')[0].replace('Corp.', 'Corp').replace('Corporation', 'Corp')
|
|
}
|
|
except:
|
|
pass
|
|
try:
|
|
for item in gainers:
|
|
all_market_settings['gainers'][item['symbol']] = {
|
|
"current": str(item['latestPrice']),
|
|
"change": str(item['change']),
|
|
"percent_change": str(item['changePercent'] * 100),
|
|
"day_low": str(item['low']),
|
|
"day_high": str(item['high']),
|
|
"volume": human_format(item['latestVolume']),
|
|
"name": item['companyName'].split(' - ')[0].replace('Corp.', 'Corp').replace('Corporation', 'Corp')
|
|
}
|
|
except:
|
|
pass
|
|
try:
|
|
for item in mostactive:
|
|
all_market_settings['mostactive'][item['symbol']] = {
|
|
"current": str(item['latestPrice']),
|
|
"change": str(item['change']),
|
|
"percent_change": str(item['changePercent'] * 100),
|
|
"day_low": str(item['low']),
|
|
"day_high": str(item['high']),
|
|
"volume": human_format(item['latestVolume']),
|
|
"name": item['companyName'].split(' - ')[0].replace('Corp.', 'Corp').replace('Corporation', 'Corp')
|
|
}
|
|
except:
|
|
pass
|
|
try:
|
|
for item in sectors:
|
|
all_sector_settings['data'][item['name']] = {
|
|
"current": str(item['performance'] * 100),
|
|
"name": str(item['name']),
|
|
"symbol": str(item['symbol'])
|
|
}
|
|
except:
|
|
pass
|
|
|
|
with open('csv/market_settings.json', 'w+') as f:
|
|
json.dump(all_market_settings, f)
|
|
with open('csv/sector_settings.json', 'w+') as f:
|
|
json.dump(all_sector_settings, f)
|
|
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateNews(api_key, logf):
|
|
try:
|
|
try:
|
|
f = open('csv/news_settings.json', 'r')
|
|
all_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_settings = {"feature": "News", "speed": "medium", "speed2": "medium", "animation": "down", "country": "US", "category": "General", "title": True, "headlines": [], "use_category": True, "use_country": False, "num_headlines": "10"}
|
|
|
|
if all_settings['use_country']:
|
|
if all_settings['country'] == 'Worldwide':
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/news_worldwide?'
|
|
else:
|
|
c_dict = {'United States':'US', 'Australia':'AU', 'Canada': 'CA', 'Great Britain':'GB', 'New Zealand':'NZ', 'Ireland':'IE', 'Singapore':'SG', 'South Africa': 'ZA', 'Germany': 'DE', 'Hong Kong': 'HK', 'Japan': 'JP', 'South Korea': 'KR', 'China': 'CN', 'France': 'FR', 'India': 'IN', 'Italy': 'IT', 'Switzerland': 'CH', 'Netherlands': 'NL', 'Spain': 'ES', 'Brazil': 'BR', 'Portugal': 'PT'}
|
|
cc = c_dict[all_settings['country']]
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/news?country={}'.format(cc)
|
|
elif all_settings['use_category']:
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/news?category={}'.format(all_settings['category'])
|
|
|
|
url += '&apiKey=' + api_key
|
|
response = requests.get(url)
|
|
data = response.json()
|
|
|
|
if len(data) > 0:
|
|
max_headlines = int(all_settings['num_headlines'])
|
|
#load user settings
|
|
headlines = data[:max_headlines]
|
|
headline_sources = [headline['source'] for headline in headlines]
|
|
headline_titles = [headline['title'] for headline in headlines]
|
|
|
|
headline_times = [headline['publishedAt'] for headline in headlines]
|
|
|
|
headlines = list(zip(headline_titles, headline_sources, headline_times))
|
|
|
|
all_settings['headlines'] = headlines
|
|
|
|
with open('csv/news_settings.json', 'w+') as f:
|
|
json.dump(all_settings, f)
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updateWeather(api_key, logf):
|
|
|
|
try:
|
|
gn = geocoders.GeoNames(username='fintic')
|
|
weather_codes = {
|
|
0: ['Clear', 'Clear sky'],
|
|
1: ['Clouds', 'few clouds'], 2: ['Clouds', 'scattered clouds'], 3: ['Clouds', 'overcast clouds'],
|
|
45: ['Fog', 'Fog'], 48: ['Fog', 'depositing rime fog'],
|
|
51: ['Drizzle', 'Light'], 53: ['Drizzle', 'moderate'], 55: ['Drizzle', 'dense'], 56: ['Drizzle', 'light'], 57: ['Drizzle', 'dense'],
|
|
61: ['Rain', 'light rain'], 63: ['Rain', 'moderate rain'], 65: ['Rain', 'very heavy rain'],
|
|
66: ['Rain', 'freezing rain'], 67: ['Rain', 'freezing rain'],
|
|
71: ['Snow', 'slight'], 73: ['Snow', 'moderate'], 75: ['Snow', 'heavy'], 77: ['Snow', 'Snow grains'], 85: ['Snow', 'slight'], 86: ['Snow', 'heavy'],
|
|
80: ['Rain', 'light intensity shower rain'], 81: ['Rain', 'shower rain'], 82: ['Rain', 'heavy intensity shower rain'],
|
|
95: ['Thunderstorm', 'Slight or moderate'], 96: ['Thunderstorm', 'slight hail'], 99: ['Thunderstorm', 'heavy hail']
|
|
}
|
|
try:
|
|
f = open('csv/daily_weather.json', 'r')
|
|
all_daily_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_daily_settings = {"feature": "Current Weather", "speed": "medium", "animation": "down", "temp": "celsius", "wind_speed": "miles/hour", "colour": "white", "city_colour": "yellow", "title": True, "locations": {"Hong Kong": [{"main_weather": "Thunderstorm", "description": "Slight or moderate", "min_temp": 28.2, "max_temp": 30.8, "temp": 30.4, "rain_chance": 17, "humidity": 77, "wind_speed": 11.1, "uv": 5.3, "clouds": 100, "wind_direction": 193, "visibility": 10000}, {"main_weather": "Clouds", "description": "overcast clouds", "min_temp": 27.9, "max_temp": 32.3}, {"main_weather": "Thunderstorm", "description": "Slight or moderate", "min_temp": 27.7, "max_temp": 31.2}, {"main_weather": "Thunderstorm", "description": "Slight or moderate", "min_temp": 28.1, "max_temp": 31.5}, {"main_weather": "Rain", "description": "light intensity shower rain", "min_temp": 28.5, "max_temp": 31.1}, {"main_weather": "Clouds", "description": "overcast clouds", "min_temp": 28.9, "max_temp": 31.5}, {"main_weather": "Clouds", "description": "scattered clouds", "min_temp": 29.0, "max_temp": 31.9}]}, "current_weather": False, "speed2": "medium"}
|
|
try:
|
|
f = open('csv/current_weather.json', 'r')
|
|
all_current_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_current_settings = {"feature": "Current Weather", "speed": "medium", "animation": "down", "temp": "celsius", "wind_speed": "miles/hour", "colour": "white", "city_colour": "yellow", "title": True, "locations": {"Hong Kong": {"main_weather": "Clouds", "description": "overcast clouds", "temp": 30.4, "min_temp": 28.2, "max_temp": 30.8, "feels_like": 36.0, "humidity": 77, "clouds": 100, "visibility": 10000, "uv": 5.3, "rain_chance": 17, "wind_speed": 11.1, "wind_direction": 193, "is_day": 1}}, "current_weather": True, "speed2": "medium"}
|
|
|
|
current_locations = list(all_current_settings['locations'].keys())
|
|
daily_locations = list(all_daily_settings['locations'].keys())
|
|
all_locations = list(set(current_locations + daily_locations))
|
|
|
|
current_weathers = {}
|
|
daily_weathers = {}
|
|
|
|
for location in all_locations:
|
|
loc = gn.geocode(location)
|
|
current_weather = {}
|
|
|
|
lat = loc.latitude
|
|
lon = loc.longitude
|
|
url = 'https://api.open-meteo.com/v1/forecast?latitude={}&longitude={}&hourly=apparent_temperature,temperature_2m,relativehumidity_2m,precipitation_probability,weathercode,cloudcover,visibility,windspeed_10m,winddirection_10m,uv_index,is_day&daily=weathercode,temperature_2m_max,temperature_2m_min¤t_weather=true&timezone=UTC'.format(lat, lon)
|
|
r = requests.get(url).json()
|
|
|
|
times = r['hourly']['time']
|
|
hour_now = datetime.now(pytz.utc).strftime('%Y-%m-%dT%H:00')
|
|
index_pos = times.index(hour_now)
|
|
|
|
main_weather_code = r['hourly']['weathercode'][index_pos]
|
|
current_weather['main_weather'] = weather_codes[main_weather_code][0]
|
|
current_weather['description'] = weather_codes[main_weather_code][1]
|
|
current_weather['temp'] = r['hourly']['temperature_2m'][index_pos]
|
|
current_weather['min_temp'] = r['daily']['temperature_2m_min'][0]
|
|
current_weather['max_temp'] = r['daily']['temperature_2m_max'][0]
|
|
current_weather['feels_like'] = r['hourly']['apparent_temperature'][index_pos]
|
|
current_weather['humidity'] = r['hourly']['relativehumidity_2m'][index_pos]
|
|
current_weather['clouds'] = r['hourly']['cloudcover'][index_pos]
|
|
if r['hourly']['visibility'][index_pos] > 10000:
|
|
current_weather['visibility'] = 10000
|
|
else:
|
|
current_weather['visibility'] = r['hourly']['visibility'][index_pos]
|
|
current_weather['uv'] = r['hourly']['uv_index'][index_pos]
|
|
current_weather['rain_chance'] = r['hourly']['precipitation_probability'][index_pos]
|
|
current_weather['wind_speed'] = r['hourly']['windspeed_10m'][index_pos]
|
|
current_weather['wind_direction'] = r['hourly']['winddirection_10m'][index_pos]
|
|
current_weather['is_day'] = r['hourly']['is_day'][index_pos]
|
|
|
|
if location in current_locations:
|
|
current_weathers[location] = current_weather
|
|
|
|
daily_weather = []
|
|
daily = r['daily']
|
|
|
|
for i in range(0,7):
|
|
dct = {}
|
|
daily_weather_code = daily['weathercode'][i]
|
|
dct['main_weather'] = weather_codes[daily_weather_code][0]
|
|
dct['description'] = weather_codes[daily_weather_code][1]
|
|
dct['min_temp'] = daily['temperature_2m_min'][i]
|
|
dct['max_temp'] = daily['temperature_2m_max'][i]
|
|
daily_weather.append(dct)
|
|
|
|
# add relevant urrent information to first day in daily
|
|
daily_weather[0]['temp'] = current_weather['temp']
|
|
daily_weather[0]['rain_chance'] = current_weather['rain_chance']
|
|
daily_weather[0]['humidity'] = current_weather['humidity']
|
|
daily_weather[0]['wind_speed'] = current_weather['wind_speed']
|
|
daily_weather[0]['uv'] = current_weather['uv']
|
|
daily_weather[0]['clouds'] = current_weather['clouds']
|
|
daily_weather[0]['wind_speed'] = current_weather['wind_speed']
|
|
daily_weather[0]['wind_direction'] = current_weather['wind_direction']
|
|
daily_weather[0]['visibility'] = current_weather['visibility']
|
|
|
|
if location in daily_locations:
|
|
daily_weathers[location] = daily_weather
|
|
|
|
all_current_settings['locations'] = current_weathers
|
|
all_daily_settings['locations'] = daily_weathers
|
|
|
|
with open('csv/current_weather.json', 'w+') as f:
|
|
json.dump(all_current_settings, f)
|
|
|
|
with open("csv/daily_weather.json", 'w+') as f:
|
|
json.dump(all_daily_settings, f)
|
|
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updateLeagueTables(api_key, logf):
|
|
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/sports?stats='
|
|
try:
|
|
try:
|
|
f = open('csv/league_tables.json', 'r')
|
|
all_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
all_settings = {"feature": "Sports (Team Stats)", "speed": "medium", "speed2": "medium","animation": "down", "title": True, "top20": 20, "leagues": {'NFL':[]}}
|
|
|
|
leagues = all_settings['leagues'].keys()
|
|
leagues_info = {}
|
|
|
|
for league in leagues:
|
|
if league == 'PREMIERLEAGUE':
|
|
url += 'PREMIERLEAGUE,'
|
|
else:
|
|
url += league + ','
|
|
|
|
url = url[:-1] # remove last comma
|
|
url += '&apiKey=' + api_key
|
|
r = requests.get(url)
|
|
|
|
all_data = r.json()
|
|
|
|
for i,l in enumerate(all_data):
|
|
league = list(l.keys())[0]
|
|
teams = []
|
|
if league == 'pga' or league == 'lpga':
|
|
logo_files = []
|
|
for d in all_data[i][league]:
|
|
del d['_id'], d['updated']
|
|
teams.append(d)
|
|
try:
|
|
if d['country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(d['country'], 'logos/ufc_countries/' + d['country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
if league == 'pga':
|
|
if d['photo'].split('/')[-1].split('&')[0] not in os.listdir('logos/pga_rank/'):
|
|
urllib.request.urlretrieve(d['photo'],'logos/pga_rank/' + d['photo'].split('/')[-1].split('&')[0])
|
|
elif league == 'lpga':
|
|
if d['photo'].split('/')[-1] not in os.listdir('logos/lpga_rank/'):
|
|
urllib.request.urlretrieve(d['photo'],'logos/lpga_rank/' + d['photo'].split('/')[-1])
|
|
except:
|
|
pass
|
|
try:
|
|
if league == 'pga':
|
|
logo_files.append(d['photo'].split('/')[-1].split('&')[0])
|
|
elif league == 'lpga':
|
|
logo_files.append(d['photo'].split('/')[-1])
|
|
except:
|
|
pass
|
|
if league == 'pga':
|
|
for file in os.listdir('logos/pga_rank/'):
|
|
if file not in logo_files:
|
|
os.remove('logos/pga_rank/'+ file)
|
|
elif league == 'lpga':
|
|
for file in os.listdir('logos/lpga_rank/'):
|
|
if file not in logo_files:
|
|
os.remove('logos/lpga_rank/'+ file)
|
|
else:
|
|
for d in all_data[i][league]:
|
|
team = {}
|
|
team['name'] = d['strTeam']
|
|
team['wins'] = d['intWin']
|
|
team['loss'] = d['intLoss']
|
|
team['draw'] = d['intDraw']
|
|
#team['played'] = d['intPlayed']
|
|
team['standing'] = d['intRank']
|
|
#team['points'] = d['intPoints']
|
|
teams.append(team)
|
|
|
|
leagues_info[league.upper()] = teams
|
|
|
|
all_settings['leagues'] = leagues_info
|
|
with open("csv/league_tables.json".format(league), 'w+') as f:
|
|
json.dump(all_settings, f)
|
|
except:
|
|
pass
|
|
#logf = open('log.txt', "a")
|
|
#exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
#logf.write(str(e))
|
|
#logf.write('. file: ' + fname)
|
|
#logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
#logf.write('. type: ' + str(exc_type))
|
|
#logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
#logf.close()
|
|
|
|
|
|
def updatePLtime():
|
|
|
|
f = open('csv/live_games.json')
|
|
|
|
try:
|
|
all_settings = json.load(f)
|
|
f.close()
|
|
|
|
try:
|
|
for league in all_settings['leagues']:
|
|
if league == 'PREMIERLEAGUE':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_pl.py"], shell=False)
|
|
premierleague = subprocess.Popen(["python3", "live_pl.py"], shell=False)
|
|
except:
|
|
pass
|
|
except:
|
|
pass
|
|
|
|
|
|
def updateLeagueEvents(api_key, time, logf):
|
|
|
|
url = 'https://bm7p954xoh.execute-api.us-east-2.amazonaws.com/default/ScriptsAPI/sports?{}='.format(time)
|
|
|
|
try:
|
|
if time == 'past':
|
|
f = open('csv/past_games.json')
|
|
elif time == 'upcoming':
|
|
f = open('csv/upcoming_games.json')
|
|
elif time == 'livescore':
|
|
f = open('csv/live_games.json')
|
|
all_settings = json.load(f)
|
|
f.close()
|
|
except:
|
|
if time == 'past':
|
|
all_settings = {"feature": "Sports (Past Games)", "speed2": "medium", "speed": "medium", "animation": "down", "title": True, "leagues": {"NFL": []}}
|
|
elif time == 'upcoming':
|
|
all_settings = {"feature": "Sports (Upcoming Games)", "speed": "medium", "speed2": "medium", "animation": "down", "title": True, "leagues": {"NFL": [{"date": "2021-11-22", "time": "01:20:00", "round": "11", "home_team": "Los Angeles Chargers", "home_score": "41", "away_team": "Pittsburgh Steelers", "away_score": "37"}]}}
|
|
elif time == 'livescore':
|
|
all_settings = {"feature": "Sports (Live Games)", "speed": "medium", "speed2": "medium", "animation": "down", "title": True, "leagues": {}}
|
|
|
|
try:
|
|
if time == 'livescore':
|
|
try:
|
|
leagues_info = {}
|
|
for league in all_settings['leagues']:
|
|
events = []
|
|
if league == 'NFL':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_nfl.py"], shell=False)
|
|
nfl = subprocess.Popen(["python3", "live_nfl.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
if league == 'NBA':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_nba.py"], shell=False)
|
|
nba = subprocess.Popen(["python3", "live_nba.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
if league == 'NHL':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_nhl.py"], shell=False)
|
|
nhl = subprocess.Popen(["python3", "live_nhl.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
if league == 'MLB':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_mlb.py"], shell=False)
|
|
mlb = subprocess.Popen(["python3", "live_mlb.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
if league == 'PREMIERLEAGUE':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_pl.py"], shell=False)
|
|
premierleague = subprocess.Popen(["python3", "live_pl.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
if league == 'MLS':
|
|
subprocess.run(["sudo", "pkill", "-f", "live_mls.py"], shell=False)
|
|
mls = subprocess.Popen(["python3", "live_mls.py"], shell=False)
|
|
events.append('Filled')
|
|
leagues_info[league.upper()] = events
|
|
|
|
all_settings['leagues'] = leagues_info
|
|
|
|
with open('csv/live_games.json', 'w+') as f:
|
|
json.dump(all_settings, f)
|
|
except:
|
|
pass
|
|
|
|
else:
|
|
|
|
leagues = all_settings['leagues'].keys()
|
|
leagues_info = {}
|
|
|
|
for league in leagues:
|
|
|
|
if league == 'PREMIERLEAGUE':
|
|
url += 'PREMIERLEAGUE,'
|
|
else:
|
|
url += league + ','
|
|
url = url[:-1] # remove last comma
|
|
url += '&apiKey=' + api_key
|
|
|
|
r = requests.get(url)
|
|
|
|
all_data = r.json()
|
|
|
|
for league in all_data.keys():
|
|
ten_or_fifteen = slice(None)
|
|
events = []
|
|
if (league == 'PGA') or (league == 'LPGA') or (league == 'PGA_EU') or (league == 'LIV') or (league == 'F1') or (league == 'NASCAR'):
|
|
if time == 'past':
|
|
ten_or_fifteen = slice(2)
|
|
else:
|
|
ten_or_fifteen = slice(3)
|
|
else:
|
|
ten_or_fifteen = slice(None)
|
|
|
|
if league == 'UFC':
|
|
event = all_data['UFC'][0]
|
|
events.append(event)
|
|
if time == 'upcoming':
|
|
try:
|
|
logo_files = []
|
|
for each in all_data['UFC'][0]['fights']:
|
|
try:
|
|
if each['fighter1pic'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc/'):
|
|
urllib.request.urlretrieve(each['fighter1pic'],'logos/ufc/' + each['fighter1pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
if each['fighter2pic'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc/'):
|
|
urllib.request.urlretrieve(each['fighter2pic'],'logos/ufc/' + each['fighter2pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
logo_files.append(each['fighter2pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
logo_files.append(each['fighter1pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
#country flags
|
|
try:
|
|
if each['fighter1country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(each['fighter1country'], 'logos/ufc_countries/' + each['fighter1country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
if each['fighter2country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(each['fighter2country'], 'logos/ufc_countries/' + each['fighter2country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
|
|
for file in os.listdir('logos/ufc/'):
|
|
if file not in logo_files:
|
|
os.remove('logos/ufc/'+ file)
|
|
except:
|
|
pass
|
|
elif time == 'past':
|
|
try:
|
|
logo_files = []
|
|
for each in all_data['UFC'][0]['fights']:
|
|
try:
|
|
if each['fighter1pic'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_past/'):
|
|
urllib.request.urlretrieve(each['fighter1pic'],'logos/ufc_past/' + each['fighter1pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
if each['fighter2pic'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_past/'):
|
|
urllib.request.urlretrieve(each['fighter2pic'],'logos/ufc_past/' + each['fighter2pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
logo_files.append(each['fighter2pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
logo_files.append(each['fighter1pic'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
#country flags
|
|
try:
|
|
if each['fighter1country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(each['fighter1country'], 'logos/ufc_countries/' + each['fighter1country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
try:
|
|
if each['fighter2country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(each['fighter2country'], 'logos/ufc_countries/' + each['fighter2country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
|
|
for file in os.listdir('logos/ufc_past/'):
|
|
if file not in logo_files:
|
|
os.remove('logos/ufc_past/'+ file)
|
|
except:
|
|
pass
|
|
else:
|
|
for d in all_data[league][ten_or_fifteen]:
|
|
event = {}
|
|
event['date'] = d['dateEvent']
|
|
try:
|
|
event['date2'] = d['dateEvent2']
|
|
except:
|
|
pass
|
|
|
|
if time == 'live':
|
|
event['progess'] = d['strProgress']
|
|
event['status'] = d['strStatus']
|
|
else:
|
|
if (league == 'PGA') or (league == 'LPGA') or (league == 'PGA_EU') or (league == 'LIV') or (league == 'F1') or (league == 'NASCAR'):
|
|
event['date'] = d['dateEvent']
|
|
try:
|
|
event['date2'] = d['dateEvent2']
|
|
except:
|
|
pass
|
|
try:
|
|
event['total_yards'] = d['total_yards']
|
|
event['shots_par'] = d['shots_par']
|
|
event['purse'] = d['purse']
|
|
except:
|
|
pass
|
|
event['event'] = d['strEvent'].replace("\u2019","'")
|
|
event['venue'] = d['strVenue'].replace("\u2019","'")
|
|
event['city'] = d['strCity'].replace("\u2019","'")
|
|
event['country'] = d['strCountry']
|
|
event['season'] = d['strSeason']
|
|
else:
|
|
event['round'] = d['intRound']
|
|
event['time'] = d['strTime']
|
|
event['home_team'] = d['strHomeTeam']
|
|
event['away_team'] = d['strAwayTeam']
|
|
|
|
if time != 'upcoming':
|
|
if (league == 'PGA') or (league == 'LPGA') or (league == 'PGA_EU'):
|
|
# event['golf_standings'] = d['strResult']
|
|
event['golf_rankings'] = d['player_results']
|
|
for player in event['golf_rankings']:
|
|
try:
|
|
if player['country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(player['country'], 'logos/ufc_countries/' + player['country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
# rank = ['n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9', 'n10', 'T1', 'T2', 'T3', 'T4', 'T5',
|
|
# 'T6', 'T7', 'T8', 'T9', 'T10']
|
|
# def convert(string):
|
|
# string = repr(string).replace('/', '')
|
|
# li = list(string.split('\\'))
|
|
# return li
|
|
|
|
# str3 = convert(event['golf_standings'])
|
|
|
|
# players = []
|
|
|
|
|
|
# for each in str3:
|
|
# each = each.replace('nT', 'T', 1)
|
|
# if each[:2] in rank:
|
|
# try:
|
|
# first_space = each.find(' ', 1)
|
|
# second_space = each.find(' ', 4)
|
|
# first_name = each[first_space:second_space].lstrip()
|
|
# initial = first_name[0] + '.'
|
|
# each = each.replace(first_name,initial)
|
|
# except:
|
|
# pass
|
|
# interator = each.find('-')
|
|
# if interator < 0:
|
|
# interator = 0
|
|
# interator2 = each[interator:interator + 3]
|
|
# result = each.split(interator2, 1)[0] + interator2
|
|
# players.append(result.rstrip())
|
|
|
|
# event['golf_standings'] = players
|
|
|
|
|
|
elif (league == 'LIV'):
|
|
# event['golf_standings'] = d['strResult']
|
|
event['golf_rankings'] = d['player_results']
|
|
for player in event['golf_rankings']:
|
|
try:
|
|
if player['country'].split('/')[-1].split('&')[0] not in os.listdir('logos/ufc_countries/'):
|
|
urllib.request.urlretrieve(player['country'], 'logos/ufc_countries/' + player['country'].split('/')[-1].split('&')[0])
|
|
except:
|
|
pass
|
|
# rank = ['n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9', 'n10', 'T1', 'T2', 'T3', 'T4', 'T5',
|
|
# 'T6', 'T7', 'T8', 'T9', 'T10']
|
|
# def convert(string):
|
|
# string = repr(string).replace('/', '')
|
|
# li = list(string.split('\\'))
|
|
# return li
|
|
|
|
# try:
|
|
# str3 = convert(event['golf_standings'].split('--------------------------------------')[0])
|
|
# strTeams = convert(event['golf_standings'].split('--------------------------------------')[1])
|
|
# except:
|
|
# pass
|
|
|
|
# players = []
|
|
# teams = []
|
|
|
|
# try:
|
|
# for each in str3:
|
|
# each = each.replace('nT', 'T', 1)
|
|
# if each[:2] in rank:
|
|
# try:
|
|
# first_space = each.find(' ', 1)
|
|
# second_space = each.find(' ', 4)
|
|
# first_name = each[first_space:second_space].lstrip()
|
|
# initial = first_name[0] + '.'
|
|
# each = each.replace(first_name,initial)
|
|
# except:
|
|
# pass
|
|
# interator = each.find('-')
|
|
# if interator < 0:
|
|
# interator = 0
|
|
# interator2 = each[interator:interator + 3]
|
|
# result = each.split(interator2, 1)[0] + interator2
|
|
# players.append(result.rstrip())
|
|
|
|
# for each in strTeams:
|
|
# each = each.replace('nT', 'T', 1)
|
|
# if each[:2] in rank:
|
|
# each = each.split('GC')
|
|
# score = each[1].rfind(' ')
|
|
# score2 = each[1][score:score+4]
|
|
# each2 = each[0] + score2
|
|
# teams.append(each2)
|
|
# except:
|
|
# pass
|
|
|
|
# event['golf_standings'] = [players] + [teams]
|
|
else:
|
|
event['away_score'] = d['intAwayScore']
|
|
event['home_score'] = d['intHomeScore']
|
|
|
|
events.append(event)
|
|
leagues_info[league.upper()] = events
|
|
all_settings['leagues'] = leagues_info
|
|
|
|
with open("csv/{}_games.json".format(time), 'w+') as f:
|
|
json.dump(all_settings, f)
|
|
except:
|
|
pass
|
|
|
|
|
|
|
|
def updateSports(api_key, logf):
|
|
#read user settings to decide which sprots to update
|
|
|
|
updateLeagueTables(api_key, logf)
|
|
|
|
updateLeagueEvents(api_key,'livescore', logf)
|
|
updateLeagueEvents(api_key,'past', logf)
|
|
updateLeagueEvents(api_key,'upcoming', logf)
|
|
|
|
|
|
|
|
|
|
def checkStocks(last_update, update_frequency):
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
opening = NY_time.replace(hour=9, minute=30, second=0, microsecond=0).replace(tzinfo=None)
|
|
closing = NY_time.replace(hour=16, minute=5, second=0, microsecond=0).replace(tzinfo=None)
|
|
|
|
|
|
f = open('csv/stocks_settings.json', 'r')
|
|
all_stocks_settings = json.load(f)
|
|
f.close()
|
|
stock_info = all_stocks_settings['symbols']
|
|
symbols = list(stock_info.keys())
|
|
|
|
updated = False
|
|
|
|
diff = (NY_time - last_update).total_seconds()/60 #minutes
|
|
if opening < NY_time < closing and datetime.today().weekday() < 5: # we need to do real time updating
|
|
|
|
|
|
if diff >= update_frequency:
|
|
updated = True
|
|
|
|
|
|
|
|
elif emptyInfo(symbols, stock_info): # if theres any empty stocks
|
|
updated = True
|
|
|
|
|
|
|
|
else:
|
|
# update if last update was before the previous days closing
|
|
yday_closing = closing - dt.timedelta(days=1)
|
|
yday_str = yday_closing.strftime("%d/%m/%Y %H:%M:%S")
|
|
yday_closing = datetime.strptime(yday_str, "%d/%m/%Y %H:%M:%S")
|
|
|
|
if last_update < yday_closing:
|
|
updated = True
|
|
|
|
|
|
return updated
|
|
|
|
|
|
def updateAll(api_key, weather_key, logf):
|
|
updateStocks(api_key, logf)
|
|
|
|
updateCrypto(api_key, logf)
|
|
|
|
updateForex(api_key, logf)
|
|
|
|
updateNews(api_key, logf)
|
|
|
|
updateSports(api_key, logf)
|
|
|
|
if weather_key:
|
|
updateWeather(weather_key, logf)
|
|
|
|
|
|
past_espn_time = True
|
|
past_pl_time = True
|
|
|
|
|
|
if __name__ == '__main__':
|
|
logf = open("log.txt", "a")
|
|
|
|
t = time.time()
|
|
|
|
update_frequencies = {'stocks':2, 'crypto':7, 'forex':60, 'news':120, 'weather': 120, 'sports': 1440, 'commodities': 15, 'indices': 15, 'movies': 1440, 'ipo': 1440, 'prepost': 15, 'economic': 15, 'jokes': 15, 'market': 5, 'quotes': 15, 'globalstocks': 15} #minutes
|
|
|
|
NY_zone = pytz.timezone('America/New_York')
|
|
CET_zone = pytz.timezone('EST')
|
|
|
|
NY_time = datetime.now(NY_zone)
|
|
|
|
CET_time = datetime.now(CET_zone)
|
|
|
|
NY_str = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
CET_str = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
#f = open('csv/last_updates.json', 'w+')
|
|
#update_times = {'stocks':NY_str, 'crypto':NY_str, 'news':NY_str, 'weather': NY_str, 'forex': CET_str} # all in NY time apart from forex in CET
|
|
#json.dump(update_times, f)
|
|
#f.close()
|
|
|
|
f = open('api_keys.txt')
|
|
|
|
api_keys = f.readlines()
|
|
api_key = api_keys[0].strip()
|
|
|
|
|
|
try:
|
|
weather_key = api_keys[1].strip()
|
|
except Exception as e:
|
|
weather_key = False
|
|
# logf = open('log.txt', "a")
|
|
# exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
# logf.write(str(e))
|
|
# logf.write('. file: ' + fname)
|
|
# logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
# logf.write('. type: ' + str(exc_type))
|
|
# logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
# logf.close()
|
|
|
|
try:
|
|
movie_key = open('movie_api_key.txt').readlines()[0]
|
|
except Exception as e:
|
|
movie_key = False
|
|
|
|
try:
|
|
ipo_key = open('ipo_api_key.txt').readlines()[0]
|
|
except Exception as e:
|
|
ipo_key = False
|
|
|
|
t = time.time()
|
|
update_processes = []
|
|
|
|
# try:
|
|
# time.sleep(60)
|
|
# f = open('csv/last_updates.json', 'r')
|
|
# last_updates = json.load(f)
|
|
# f.close()
|
|
# last_updates['stocks']['force'] = True
|
|
# #last_updates['weather']['force'] = True
|
|
# f = open('csv/last_updates.json', 'w')
|
|
# json.dump(last_updates, f)
|
|
# f.close()
|
|
# except:
|
|
# pass
|
|
|
|
|
|
|
|
try:
|
|
while True:
|
|
try:
|
|
f = open('csv/last_updates.json', 'r')
|
|
last_updates = json.load(f)
|
|
f.close()
|
|
|
|
except:
|
|
last_updates = {"scheduler":{"force": False}, "stocks": {"time": "06/03/2022 04:12:09", "force": True}, "crypto": {"time": "06/03/2022 04:10:39", "force": True},
|
|
"news": {"time": "06/03/2022 04:07:09", "force": True}, "weather": {"time": "06/03/2022 04:08:20", "force": True},
|
|
"forex": {"time": "06/03/2022 03:54:02", "force": True}, "sports_l": {"time": "06/03/2022 04:10:09", "force": True},
|
|
"sports_p": {"time": "06/03/2022 04:10:09", "force": True},
|
|
"sports_u": {"time": "06/03/2022 04:10:09", "force": True},"sports_t": {"time": "06/03/2022 04:10:09", "force": True}, "commodities": {"time": "06/03/2022 04:10:09", "force": True}, "indices": {"time": "06/03/2022 04:10:09", "force": True}, "movies": {"time": "06/03/2022 04:10:09", "force": True}, "ipo": {"time": "06/03/2022 04:10:09", "force": True},
|
|
"prepost": {"time": "06/03/2022 04:10:09", "force": True}, "economic": {"time": "06/03/2022 04:10:09", "force": True}, "jokes": {"time": "06/03/2022 04:10:09", "force": True}, "market": {"time": "06/03/2022 04:10:09", "force": True}, "sector": {"time": "06/03/2022 04:10:09", "force": True}, "quotes": {"time": "06/03/2022 04:10:09", "force": True}, "globalstocks": {"time": "06/03/2022 04:10:09", "force": True}}
|
|
|
|
try:
|
|
if last_updates['scheduler']['force']:
|
|
try:
|
|
f = open('csv/scheduler.json','r')
|
|
schedules = json.load(f)
|
|
f.close()
|
|
shutdown_schedule_hour = schedules['shutdown']['hour']
|
|
shutdown_schedule_minute = schedules['shutdown']['minute']
|
|
|
|
reboot_schedule_hour = schedules['reboot']['hour']
|
|
reboot_schedule_minute = schedules['reboot']['minute']
|
|
|
|
timezone = schedules['timezone']
|
|
shutdown_enabled = schedules['shutdown']['enabled']
|
|
reboot_enabled = schedules['reboot']['enabled']
|
|
except:
|
|
shutdown_schedule_hour = "00"
|
|
shutdown_schedule_minute = "00"
|
|
|
|
reboot_schedule_hour = "00"
|
|
reboot_schedule_minute = "00"
|
|
|
|
timezone = "GMT"
|
|
shutdown_enabled = False
|
|
reboot_enabled = False
|
|
last_updates['scheduler']['force'] = False
|
|
except:
|
|
pass
|
|
#SHUTDOWN
|
|
try:
|
|
if datetime.now(pytz.timezone(timezone)).strftime("%H:%M") == shutdown_schedule_hour+':'+shutdown_schedule_minute and shutdown_enabled:
|
|
os.system('sudo shutdown now')
|
|
except:
|
|
pass
|
|
#REBOOT
|
|
try:
|
|
if datetime.now(pytz.timezone(timezone)).strftime("%H:%M") == reboot_schedule_hour+':'+reboot_schedule_minute and reboot_enabled:
|
|
os.system('sudo reboot')
|
|
except:
|
|
pass
|
|
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
|
|
#msg = getInput()
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
#stocks
|
|
|
|
stock_time = datetime.strptime(last_updates['stocks']['time'], "%d/%m/%Y %H:%M:%S")
|
|
stock_frequency = update_frequencies['stocks']
|
|
diff = (NY_time - stock_time).total_seconds()/60 #minutes
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
opening = NY_time.replace(hour=9, minute=30, second=0, microsecond=0).replace(tzinfo=None)
|
|
closing = NY_time.replace(hour=16, minute=5, second=0, microsecond=0).replace(tzinfo=None)
|
|
stock_open = opening < NY_time < closing and datetime.today().weekday() <= 4
|
|
|
|
if last_updates['stocks']['force'] or (diff >= update_frequencies['stocks'] and stock_open):# or msg == 's':
|
|
stock_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['stocks']['time'] = stock_time
|
|
last_updates['stocks']['force'] = False
|
|
#updateStocks(api_key)
|
|
update_process = Process(target = updateStocks, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
NY_time1 = datetime.now(NY_zone).replace(tzinfo=None)
|
|
NY_time2 = datetime.now(NY_zone).replace(tzinfo=None)
|
|
#prepost
|
|
|
|
preopen = NY_time1.replace(hour=4, minute=0, second=0, microsecond=0).replace(tzinfo=None)
|
|
preclose = NY_time1.replace(hour=9, minute=30, second=0, microsecond=0).replace(tzinfo=None)
|
|
|
|
postopen = NY_time2.replace(hour=16, minute=0, second=0, microsecond=0).replace(tzinfo=None)
|
|
postclose = NY_time2.replace(hour=20, minute=20, second=0, microsecond=0).replace(tzinfo=None)
|
|
|
|
prepost_frequency = update_frequencies['prepost']
|
|
prepost_time = datetime.strptime(last_updates['prepost']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
pre_open = preopen < NY_time1 < preclose and NY_time1.weekday() <= 4
|
|
post_open = postopen < NY_time2 < postclose and NY_time2.weekday() <= 4
|
|
|
|
diff1 = (NY_time1 - prepost_time).total_seconds()/60 #minutes
|
|
diff2 = (NY_time2 - prepost_time).total_seconds()/60 #minutes
|
|
|
|
if (last_updates['prepost']['force']) or (diff1 >= update_frequencies['prepost'] and pre_open) or (diff2 >= update_frequencies['prepost'] and post_open):
|
|
prepost_time = NY_time1.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['prepost']['time'] = prepost_time
|
|
last_updates['prepost']['force'] = False
|
|
update_process = Process(target = updateStocksPrePost, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
# crypto
|
|
crypto_time = datetime.strptime(last_updates['crypto']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - crypto_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['crypto']['force'] or diff >= update_frequencies['crypto']:# or msg == 'c':
|
|
crypto_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateCrypto(api_key, logf)
|
|
last_updates['crypto']['time'] = crypto_time
|
|
last_updates['crypto']['force'] = False
|
|
update_process = Process(target = updateCrypto, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# commodities
|
|
commodities_time = datetime.strptime(last_updates['commodities']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - commodities_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['commodities']['force'] or diff >= update_frequencies['commodities']:# or msg == 'c':
|
|
commodities_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateCrypto(api_key, logf)
|
|
|
|
last_updates['commodities']['time'] = commodities_time
|
|
last_updates['commodities']['force'] = False
|
|
update_process = Process(target = updateCommodities, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# economic calendar
|
|
economic_time = datetime.strptime(last_updates['economic']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - economic_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['economic']['force'] or diff >= update_frequencies['economic']:# or msg == 'c':
|
|
economic_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['economic']['time'] = economic_time
|
|
last_updates['economic']['force'] = False
|
|
update_process = Process(target = updateEconomic, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# market
|
|
market_time = datetime.strptime(last_updates['market']['time'], "%d/%m/%Y %H:%M:%S")
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - market_time).total_seconds()/60 #minutes
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
opening = NY_time.replace(hour=9, minute=30, second=0, microsecond=0).replace(tzinfo=None)
|
|
closing = NY_time.replace(hour=16, minute=20, second=0, microsecond=0).replace(tzinfo=None)
|
|
stock_open = opening < NY_time < closing and datetime.today().weekday() <= 4
|
|
|
|
if last_updates['market']['force'] or (diff >= update_frequencies['market'] and stock_open):# or msg == 'c':
|
|
market_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['market']['time'] = market_time
|
|
last_updates['market']['force'] = False
|
|
last_updates['sector']['time'] = market_time
|
|
last_updates['sector']['force'] = False
|
|
update_process = Process(target = updateMarket, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# jokes
|
|
jokes_time = datetime.strptime(last_updates['jokes']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - jokes_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['jokes']['force'] or diff >= update_frequencies['jokes']:# or msg == 'c':
|
|
jokes_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['jokes']['time'] = jokes_time
|
|
last_updates['jokes']['force'] = False
|
|
update_process = Process(target = updateJokes, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# quotes
|
|
quotes_time = datetime.strptime(last_updates['quotes']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - quotes_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['quotes']['force'] or diff >= update_frequencies['quotes']:# or msg == 'c':
|
|
quotes_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['quotes']['time'] = quotes_time
|
|
last_updates['quotes']['force'] = False
|
|
update_process = Process(target = updateQuotes, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# global stocks
|
|
globalstocks_time = datetime.strptime(last_updates['globalstocks']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - globalstocks_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['globalstocks']['force'] or diff >= update_frequencies['globalstocks']:# or msg == 'c':
|
|
globalstocks_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['globalstocks']['time'] = globalstocks_time
|
|
last_updates['globalstocks']['force'] = False
|
|
update_process = Process(target = updateGlobalStocks, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
# indices
|
|
indices_time = datetime.strptime(last_updates['indices']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - indices_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['indices']['force'] or diff >= update_frequencies['indices']:# or msg == 'c':
|
|
indices_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
last_updates['indices']['time'] = indices_time
|
|
last_updates['indices']['force'] = False
|
|
update_process = Process(target = updateIndices, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# movies
|
|
movies_time = datetime.strptime(last_updates['movies']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - movies_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['movies']['force'] or diff >= update_frequencies['movies']:
|
|
movies_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['movies']['time'] = movies_time
|
|
last_updates['movies']['force'] = False
|
|
update_process = Process(target = updateMovies, args = (movie_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# ipos
|
|
ipo_time = datetime.strptime(last_updates['ipo']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - ipo_time).total_seconds()/60 #minutes
|
|
|
|
if last_updates['ipo']['force'] or diff >= update_frequencies['ipo']:
|
|
ipo_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['ipo']['time'] = ipo_time
|
|
last_updates['ipo']['force'] = False
|
|
update_process = Process(target = updateIpo, args = (ipo_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# weather
|
|
weather_time = datetime.strptime(last_updates['weather']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - weather_time).total_seconds()/60 #minutes
|
|
if last_updates['weather']['force'] or diff >= update_frequencies['weather']:# or msg == 'w':
|
|
weather_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
#updateWeather(weather_key)
|
|
last_updates['weather']['time'] = weather_time
|
|
last_updates['weather']['force'] = False
|
|
update_process = Process(target = updateWeather, args = (weather_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
# news
|
|
news_time = datetime.strptime(last_updates['news']['time'], "%d/%m/%Y %H:%M:%S")
|
|
|
|
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - news_time).total_seconds()/60 #minutes
|
|
if last_updates['news']['force'] or diff >= update_frequencies['news']:# or msg == 'n':
|
|
news_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateNews(api_key)
|
|
last_updates['news']['time'] = news_time
|
|
last_updates['news']['force'] = False
|
|
update_process = Process(target = updateNews, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# sports upcoming
|
|
sports_time = datetime.strptime(last_updates['sports_u']['time'], "%d/%m/%Y %H:%M:%S")
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - sports_time).total_seconds()/60 #minutes
|
|
if last_updates['sports_u']['force'] or diff >= update_frequencies['sports']:# or msg == 'S':
|
|
sports_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateSports(api_key)
|
|
last_updates['sports_u']['time'] = sports_time
|
|
last_updates['sports_u']['force'] = False
|
|
update_process = Process(target = updateLeagueEvents, args = (api_key,'upcoming',logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
# sports live
|
|
sports_time = datetime.strptime(last_updates['sports_l']['time'], "%d/%m/%Y %H:%M:%S")
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
espn_time = "17:00Z"
|
|
espn_time_est = "12:00"
|
|
# if datetime.now(pytz.utc).strftime("%H:%MZ") < espn_time:
|
|
if datetime.now(pytz.timezone('America/New_York')).strftime("%H:%M") < espn_time_est:
|
|
past_espn_time = True
|
|
if last_updates['sports_l']['force'] or (datetime.now(pytz.timezone('America/New_York')).strftime("%H:%M") >= espn_time_est and past_espn_time):# or msg == 'S':
|
|
# if last_updates['sports_l']['force'] or (datetime.now(pytz.utc).strftime("%H:%MZ") >= espn_time and past_espn_time):# or msg == 'S':
|
|
sports_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['sports_l']['time'] = sports_time
|
|
last_updates['sports_l']['force'] = False
|
|
past_espn_time = False
|
|
update_process = Process(target = updateLeagueEvents, args = (api_key, 'livescore',logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
#sports live (premier league)
|
|
pl_time = "12:00Z"
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
sports_time = datetime.strptime(last_updates['sports_l']['time'], "%d/%m/%Y %H:%M:%S")
|
|
if datetime.now(pytz.utc).strftime("%H:%MZ") < pl_time:
|
|
past_pl_time = True
|
|
if datetime.now(pytz.utc).strftime("%H:%MZ") >= pl_time and past_pl_time:# or msg == 'S':
|
|
sports_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
past_pl_time = False
|
|
last_updates['sports_l']['time'] = sports_time
|
|
last_updates['sports_l']['force'] = False
|
|
update_process = Process(target = updatePLtime)
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
# sports past
|
|
sports_time = datetime.strptime(last_updates['sports_p']['time'], "%d/%m/%Y %H:%M:%S")
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - sports_time).total_seconds()/60 #minutes
|
|
if last_updates['sports_p']['force'] or diff >= update_frequencies['sports']:# or msg == 'S':
|
|
sports_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateSports(api_key)
|
|
last_updates['sports_p']['time'] = sports_time
|
|
last_updates['sports_p']['force'] = False
|
|
update_process = Process(target = updateLeagueEvents, args = (api_key,'past',logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
# sports table
|
|
sports_time = datetime.strptime(last_updates['sports_t']['time'], "%d/%m/%Y %H:%M:%S")
|
|
NY_time = datetime.now(NY_zone).replace(tzinfo=None)
|
|
diff = (NY_time - sports_time).total_seconds()/60 #minutes
|
|
if last_updates['sports_t']['force'] or diff >= update_frequencies['sports']:# or msg == 'S':
|
|
sports_time = NY_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
#updateSports(api_key)
|
|
last_updates['sports_t']['time'] = sports_time
|
|
last_updates['sports_t']['force'] = False
|
|
update_process = Process(target = updateLeagueTables, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
|
|
|
|
#forex updates once every 24hours at 1700 CET
|
|
|
|
# update if last update was before the previous days closing
|
|
forex_time = datetime.strptime(last_updates['forex']['time'], "%d/%m/%Y %H:%M:%S")
|
|
CET_time = datetime.now(CET_zone).replace(tzinfo=None)
|
|
yday_update = (CET_time.replace(hour=17, minute=00, second=0, microsecond=0) - dt.timedelta(days=1)).replace(tzinfo=None)
|
|
diff = (CET_time.replace(tzinfo=None) - forex_time).total_seconds()/60
|
|
|
|
opening = CET_time.replace(hour=17, minute=0, second=0, microsecond=0).replace(tzinfo=None)
|
|
|
|
#forex updates between 5pm sunday and 5pm friday every hour
|
|
forex_open = datetime.today().weekday() < 4 or (datetime.today().weekday() == 6 and CET_time > opening) or (datetime.today().weekday() == 4 and CET_time < opening)
|
|
|
|
if last_updates['forex']['force'] or (diff >= update_frequencies['forex'] and forex_open):# or msg == 'f':
|
|
forex_time = CET_time.strftime("%d/%m/%Y %H:%M:%S")
|
|
last_updates['forex']['time'] = forex_time
|
|
last_updates['forex']['force'] = False
|
|
#updateForex(api_key)
|
|
update_process = Process(target = updateForex, args = (api_key,logf))
|
|
update_process.start()
|
|
update_processes.append(update_process)
|
|
|
|
f = open('csv/last_updates.json', 'w+')
|
|
json.dump(last_updates, f)
|
|
f.close()
|
|
for process in update_processes:
|
|
if not process.is_alive():
|
|
process.join()
|
|
process.terminate()
|
|
update_processes.remove(process)
|
|
|
|
time.sleep(10)
|
|
|
|
except:
|
|
pass
|
|
# logf = open('log.txt', "a")
|
|
# exc_type, exc_obj, exc_tb = sys.exc_info()
|
|
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
|
# logf.write(str(e))
|
|
# logf.write('. file: ' + fname)
|
|
# logf.write('. line: ' + str(exc_tb.tb_lineno))
|
|
# logf.write('. type: ' + str(exc_type))
|
|
# logf.write('\n ' + "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])))
|
|
# logf.close()
|