Untitled
unknown
plain_text
9 months ago
12 kB
10
Indexable
from itertools import product, combinations
import pandas as pd
import numpy as np
def generate_internal_rebalance_ratios(n, ratio_type='default'):
"""
Generate an internal rebalance ratio list of length n.
All ratios are manually defined for n from 2 to 10.
Parameters:
n (int): Number of assets (2 to 10).
ratio_type (str): 'default', 'equal', 'gradual', or 'top_heavy'.
Returns:
list: Internal rebalance ratios.
"""
if n < 2 or n > 10:
raise ValueError("Number of assets must be between 2 and 10.")
ratios = {
'default': {
4: [0.7, 0.3, -0.3, -0.7],
5: [0.7, 0.3, 0, -0.3, -0.7],
6: [0.7, 0.2, 0.1, -0.1, -0.2, -0.7],
7: [0.7, 0.2, 0.1, 0, -0.1, -0.2, -0.7],
8: [0.7, 0.2, 0.1, 0, 0, -0.1, -0.2, -0.7],
9: [0.7, 0.2, 0.1, 0, 0, 0, -0.1, -0.2, -0.7],
10: [0.7, 0.2, 0.1, 0, 0, 0, 0, -0.1, -0.2, -0.7]
},
'equal': {
4: [0.5, 0.5, -0.5, -0.5],
5: [0.5, 0.5, 0, -0.5, -0.5],
6: [0.4, 0.3, 0.3, -0.3, -0.3, -0.4],
7: [0.4, 0.3, 0.3, 0, -0.3, -0.3, -0.4],
8: [0.25, 0.25, 0.25, 0.25, -00.25, -0.25, -0.25, -0.25],
9: [0.25, 0.25, 0.25, 0.25, 0, -00.25, -0.25, -0.25, -0.25],
10: [0.2, 0.2, 0.2, 0.2, 0.2, -0.2, -0.2, -0.2, -0.2, -0.2]
},
'gradual': {
4: [0.6, 0.4, -0.4, -0.6],
5: [0.6, 0.4, 0, -0.4, -0.6],
6: [0.6, 0.2, 0.2, -0.2, -0.2, -0.6],
7: [0.6, 0.2, 0.2, 0, -0.2, -0.2, -0.6],
8: [0.5, 0.3, 0.2, 0, 0, -0.2, -0.3, -0.5],
9: [0.4, 0.3, 0.2, 0.1, 0, -0.1, -0.2, -0.3, -0.4],
10: [0.4, 0.3, 0.2, 0.1, 0, -0, -0.1, -0.2, -0.3, -0.4]
},
'top_heavy': {
4: [0.8, 0.2, -0.2, -0.8],
5: [0.7, 0.3, 0, -0.3, -0.7],
6: [0.7, 0.3, 0, 0, -0.3, -0.7],
7: [0.7, 0.3, 0, 0, 0, -0.3, -0.7],
8: [0.7, 0.3, 0.0, 0, 0, 0, -0.3, -0.7],
9: [0.7, 0.3, 0, 0, 0, 0, 0, -0.3, -0.7],
10: [0.6, 0.4, 0, 0, 0, 0, 0, 0, -0.4, -0.6]
}
}
if ratio_type not in ratios:
raise ValueError("Invalid ratio_type. Choose from 'default', 'equal', 'gradual', or 'top_heavy'.")
return ratios[ratio_type][n]
# Load the price data once.
filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Historic Prices.xlsx"
prices = load_price_data(filepath)
# Load the risk free rate data.
risk_free_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Risk Free Rate.xlsx"
risk_free_df = pd.read_excel(risk_free_filepath, sheet_name="Risk Free Rate", parse_dates=[0])
risk_free_df.columns = ['Date', '1m', '3m']
# Define the asset tickers.
base_tickers = ['SPY US Equity', 'TLT US Equity', 'GLD US Equity', 'SHV US Equity']
additional_tickers = ['EEM US Equity', 'QQQ US Equity', 'LQD US Equity', 'HYG US Equity', 'VNQ US Equity', 'DBC US Equity']
# Define simulation parameters.
initial_aum = 100e6
start_date = pd.to_datetime('2008-01-01')
end_date = pd.to_datetime('2025-02-01')
# Parameter grid.
metric_types = ['simple']
lookback_periods = [1, 3, 6, 9, 12]
rebalance_ratios = [0.05, 0.1, 0.2]
rebalance_periods = [1, 2, 3]
ratio_types = ['default', 'equal', 'gradual', "top_heavy"]
# Store results.
results = []
# Generate all possible combinations of parameters.
combinations_params = list(product(
metric_types, lookback_periods, rebalance_ratios, rebalance_periods, ratio_types
))
def calculate_correlations(prices, tickers):
returns = prices[tickers].pct_change().dropna()
return returns.corr()
def generate_valid_combinations(base, additional, prices, max_additional=6):
valid_combinations = [base] # Start with the base combination.
correlations = calculate_correlations(prices, base + additional)
for i in range(1, max_additional + 1):
for add in combinations(additional, i):
candidate = base + list(add)
if len(candidate) <= 5 or len(candidate) == 10 or len(candidate) == 9:
valid_combinations.append(candidate)
elif 6 <= len(candidate) <= 8:
is_valid = True
for additional_asset in add:
for base_asset in base:
if correlations.loc[additional_asset, base_asset] > 0.65:
is_valid = False
break
if not is_valid:
break
if is_valid:
valid_combinations.append(candidate)
return valid_combinations
# Generate valid ticker combinations.
ticker_combinations = generate_valid_combinations(base_tickers, additional_tickers, prices)
# Define a function to calculate Sharpe ratio.
def calculate_sharpe_ratio(returns, period):
volatility = returns.std()
avg_return = returns.mean()
# Map rebalance period to an annualization factor.
annualization_factor = {1: 12, 2: 6, 3: 4}.get(period, np.nan)
return (avg_return / volatility) * (annualization_factor ** 0.5) if volatility > 0 else np.nan
# Iterate over valid ticker combinations and parameter grid.
for ticker_subset in ticker_combinations:
for (metric_type, lookback_period, rebalance_ratio, rebalance_period, ratio_type) in combinations_params:
try:
# Generate the internal rebalance ratios based on number of tickers and ratio type.
internal_rebalance = generate_internal_rebalance_ratios(len(ticker_subset), ratio_type)
# Run the simulation with the selected tickers.
result_df_intermediate = simulate_strategy(
prices, ticker_subset, [], [], initial_aum, start_date, end_date,
rebalance_period, rebalance_ratio, lookback_period, metric_type, internal_rebalance
)
# Ensure that there is a 'Date' column (if not, reset the index).
if 'Date' not in result_df_intermediate.columns:
result_df_intermediate = result_df_intermediate.reset_index().rename(columns={'index': 'Date'})
# Merge risk free rate data on the 'Date' column.
merged_df = pd.merge(result_df_intermediate, risk_free_df, on='Date', how='left')
# Adjust risk free rate based on the rebalance period.
if rebalance_period == 1:
merged_df['rf_adjust'] = merged_df['1m'] / 12
elif rebalance_period == 2:
merged_df['rf_adjust'] = merged_df['1m'] / 6
elif rebalance_period == 3:
merged_df['rf_adjust'] = merged_df['1m'] / 4
else:
merged_df['rf_adjust'] = 0
# Compute excess returns by subtracting the risk free rate from monthly returns.
merged_df['Excess Return'] = merged_df['Return'] - (merged_df['rf_adjust']/100)
# Calculate performance metrics using excess returns.
final_aum = merged_df['Portfolio Value'].iloc[-1]
cumulative_return = (final_aum / initial_aum) - 1
num_months = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
annualized_return = (1 + cumulative_return) ** (12 / num_months) - 1
annualization_factor = {1: 12, 2: 6, 3: 4}.get(rebalance_period, 12) # Default to 12 if not found
excess_returns = merged_df['Excess Return'].dropna()
avg_monthly_excess_return = excess_returns.mean()
monthly_excess_volatility = excess_returns.std()
sharpe_ratio = calculate_sharpe_ratio(excess_returns, rebalance_period)
annualized_volatility = monthly_excess_volatility * (annualization_factor ** 0.5)
annualised_risk_premium = avg_monthly_excess_return*annualization_factor
result_df_intermediate = result_df_intermediate.set_index("Date")
# Extract year from the 'Date' column
returns = result_df_intermediate['Return']
def compute_max_drawdown(returns):
"""Compute the maximum drawdown from a series of returns."""
cumulative = (1 + returns).cumprod()
running_max = cumulative.cummax()
drawdown = (cumulative - running_max) / running_max
return drawdown.min()
max_drawdown = compute_max_drawdown(returns)
# Calculate minimum and maximum annual returns
annual_returns = returns.groupby(returns.index.year).apply(lambda x: (1 + x).prod() - 1)
min_annual_return = annual_returns.min()
max_annual_return = annual_returns.max()
# Store the results.
results.append({
'Ticker Subset': tuple(ticker_subset),
'Num Assets': len(ticker_subset),
'Internal Rebalance Type': ratio_type,
'Internal Rebalance': internal_rebalance,
'Rebalance Ratio': rebalance_ratio,
'Rebalance Period': rebalance_period,
'Lookback Period': lookback_period,
'Annualized Return': annualized_return,
'Annualised risk Premium': annualised_risk_premium,
'Annualised excess Volatility': annualized_volatility,
'Sharpe Ratio': sharpe_ratio,
'Minimum Annual Return' : min_annual_return,
'Maximum Annual Return' : max_annual_return,
'Maximum Drawdown' : max_drawdown
})
except ValueError as e:
print(f"Skipping combination due to error: {e}")
continue
# Create the results DataFrame.
summary_df = pd.DataFrame(results)
# Sort by Sharpe Ratio (Descending).
summary_df = summary_df.sort_values(by='Sharpe Ratio', ascending=False).reset_index(drop=True)
# Display the final results.
pd.set_option('display.float_format', lambda x: f'{x:,.2f}')
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
print(summary_df)
summary_df.to_excel(r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\summary_df_startin_2007_final_v3.xlsx")
summary_df = pd.read_excel(r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\summary_df_startin_2007_final_v3.xlsx")
summary_df = summary_df.copy()
summary_df['Best_Sharpe'] = summary_df.groupby(['Num Assets'])['Sharpe Ratio'].transform('max') == summary_df['Sharpe Ratio']
# Flag rows with best Annualized Return in each group
summary_df['Best_Annualized'] = summary_df.groupby(['Num Assets'])['Maximum Drawdown'].transform('max') == summary_df['Maximum Drawdown']
# summary_df['Best_Volatility'] = summary_df.groupby(['Num Assets', 'Rebalance Period', 'Lookback Period', 'Internal Rebalance'])['Monthly Volatility'].transform('min') == summary_df['Monthly Volatility']
# Optionally, combine them into one flag if you prefer (i.e. highlight if a row is best in either)
summary_df['Optimal'] = summary_df['Best_Sharpe'] & summary_df['Best_Annualized']
summary_df.to_excel(r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\summary_df_startin_2007_final_v3.xlsx")Editor is loading...
Leave a Comment