Untitled
unknown
plain_text
a year ago
7.1 kB
9
Indexable
import os, sys, csv, json
import argparse
from datetime import datetime, timedelta
import sqlalchemy
import pyodbc
def read_connection_token(token):
with open(os.path.join("/", "var","connections",token+".json")) as reader:
data = json.load(reader)
return (data["host"],data["username"],data["password"],data["name"])
a2_query = """
SELECT
Ticker, Issuer, ResourceProvider, StartDate, EndDate, StartTime,
EndTime, Location, Region, Format, Value, Currency, Title, AggregatedStatus,
Attendee, Status, Note, Team, CreatedAt, LastUpdate, CreatedBy,
LastUpdateBy, InOffice, AllDay, CreatedByIngestor, Id
FROM A2ARecords
"""
a2_data_headers = [
"Ticker","Issuer","ResourceProvider","StartDate","EndDate","StartTime",
"EndTime","Location","Region","Format","Value","Currency","Title","AggregatedStatus",
"Attendee","Status","Note","Team","CreatedAt","LastUpdate","CreatedBy",
"LastUpdateBy","InOffice","AllDay","CreatedByInvestor","Id","SplitTeam", "Parent Broker"]
commission_query = "EXEC jg.GetCommissionData_Legacy '20220101', '%s'" % datetime.now().strftime('%Y%m%d')
commission_headers = ["Account","Broker Code","Parent Broker","Broker Name","Contra Broker",
"SecType","Number of Trades","Total Commission","Shares","t_tradedate",
"Month","Year","Week","Week date","Category","Category Sort","Split Team",
"Research Team"]
rec_query = """
SELECT Attendee, COUNT(*) AS Unreconciled
FROM dbo.A2ARecords
WHERE Status = 'None'
AND (BuysideStatus != 'Canceled' OR BuysideStatus IS NULL)
AND IsReviewRequired = 1
AND StartDate >= DATEFROMPARTS(YEAR(GETDATE()), 1, 1)
AND [Value] > 750
GROUP BY Attendee
ORDER BY COUNT(*) DESC
"""
rec_headers = ["Attendee","Unreconciled"]
def load_commission_splits(split_path):
split_pct = dict()
with open(split_path) as handle:
reader = csv.reader(handle)
next(reader)
for line in reader:
[name, team, weighting, account] = line
if account == "-": continue
if account not in split_pct: split_pct[account] = dict()
split_pct[account][team] = float(weighting)
return split_pct
def load_research_splits(split_path):
split_pct = dict()
with open(split_path) as handle:
reader = csv.reader(handle)
next(reader)
for line in reader:
[name, team, weighting] = [x.strip() for x in line]
if name not in split_pct: split_pct[name] = dict()
split_pct[name][team] = float(weighting)
return split_pct
def load_commission_map(map_path):
mapping = dict()
reverse_mapping = dict()
with open(map_path) as handle:
reader = csv.reader(handle)
next(reader)
for line in reader:
[p_broker, name] = [x.strip() for x in line]
mapping[p_broker] = name
reverse_mapping[name] = p_broker
return (mapping, reverse_mapping)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='App for building the free rider commission/research report')
parser.add_argument('--commission_splits', type=str, help="the secmaster token")
parser.add_argument('--research_splits', type=str, help="the secmaster token")
parser.add_argument('--broker_mapping', type=str, help="the mapping between commission brokers and research names")
parser.add_argument('--output', type=str, help="the output directory")
parser.add_argument('--startDate', type=str, default=datetime(datetime.now().year,1,1).strftime("%Y%m%d"), help="start date")
parser.add_argument('--endDate', type=str, default=datetime.now().strftime("%Y%m%d"), help="end date")
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
commission_splits = load_commission_splits(args.commission_splits)
research_splits = load_research_splits(args.research_splits)
(commission_mapping, broker_mapping) = load_commission_map(args.broker_mapping)
a2a_connection = pyodbc.connect('server=azjgmikdb01.jgoldman.net;database=Risk;port=1433;Trusted_Connection=yes;DRIVER={ODBC Driver 18 for SQL Server};Encrypt=No')
a2_data_raw = a2a_connection.execute(a2_query)
output_a2_data = []
for record in a2_data_raw:
attendee = record[14]
value = record[10]
resource_provider = record[2]
start_date = str(record[3]).replace("-","")
team_splits = research_splits.get(attendee,dict())
if start_date >= args.startDate and start_date <= args.endDate:
for team,pct in team_splits.items():
if value and value != "NULL": amend_value = pct * float(value)
else: amend_value = value
copied_record = [x for x in record]
copied_record[10] = amend_value
copied_record.append(team)
copied_record.append(broker_mapping.get(resource_provider, "Other"))
output_a2_data.append(copied_record)
a2_data_raw.close()
commission_connection = pyodbc.connect('server=azjgmikdb01.jgoldman.net;database=JGoldmanCore;port=1433;Trusted_Connection=yes;DRIVER={ODBC Driver 18 for SQL Server};Encrypt=No')
commission_data_raw = commission_connection.execute(commission_query)
output_commissions_data = []
for record in commission_data_raw:
account = record[0]
p_broker = record[2]
trade_date = str(record[9]).split(' ')[0].replace('-','')
commission = float(record[7])
team_splits = commission_splits.get(account,dict())
if trade_date >= args.startDate and trade_date <= args.endDate:
for team,pct in team_splits.items():
amend_value = pct * float(commission)
copied_record = [x for x in record]
copied_record[7] = amend_value
copied_record.append(team)
copied_record.append(commission_mapping.get(p_broker, "Other"))
output_commissions_data.append(copied_record)
rec_connection = pyodbc.connect('server=azjgmikdb01.jgoldman.net;database=Risk;port=1433;Trusted_Connection=yes;DRIVER={ODBC Driver 18 for SQL Server};Encrypt=No')
rec_data_raw = rec_connection.execute(rec_query)
with open(os.path.join(args.output, "a2data.csv"),"w", encoding='utf8', newline='') as w_handle:
writer = csv.writer(w_handle)
writer.writerow(a2_data_headers)
for line in output_a2_data:
cleaned_line = [str(x).replace("\n","") for x in line]
writer.writerow([cl for cl in cleaned_line])
with open(os.path.join(args.output, "commissions.csv"),"w", newline='') as w_handle:
writer = csv.writer(w_handle)
writer.writerow(commission_headers)
for line in output_commissions_data:
writer.writerow(line)
with open(os.path.join(args.output, "recdata.csv"),"w", encoding='utf8', newline='') as w_handle:
writer = csv.writer(w_handle)
writer.writerow(rec_headers)
for line in rec_data_raw:
cleaned_line = [str(x).replace("\n","") for x in line]
writer.writerow([cl for cl in cleaned_line])
Editor is loading...
Leave a Comment