Untitled
unknown
plain_text
9 months ago
10 kB
18
Indexable
# -------------------------------
# 4. Performance Metrics Calculation (Daily Adjusted with Month-End Annual Returns)
# -------------------------------
# Load the risk free rate data
risk_free_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Risk Free Rate.xlsx"
risk_free_df = pd.read_excel(risk_free_filepath, sheet_name="Risk Free Rate", parse_dates=[0])
risk_free_df.columns = ['Date', '1m', '3m']
# Ensure that there is a 'Date' column in result_df
if 'Date' not in result_df.columns:
result_df = result_df.reset_index().rename(columns={'index': 'Date'})
# Merge risk free rate data on the 'Date' column and sort by date
merged_df = pd.merge(result_df, risk_free_df, on='Date', how='left')
merged_df = merged_df.sort_values('Date').reset_index(drop=True)
# Convert the risk free rate to a daily rate.
# Assuming the '1m' column is an annualized percentage rate, e.g. 3.0 for 3%.
merged_df['rf_daily'] = merged_df['1m'] / 100 / 252
# Compute daily excess returns by subtracting the daily risk free rate from the daily return
merged_df['Excess Return'] = merged_df['Return'] - merged_df['rf_daily']
# Update the Sharpe ratio calculation for daily returns using 252 trading days
def calculate_sharpe_ratio(returns, trading_days=252):
volatility = returns.std()
avg_return = returns.mean()
return (avg_return / volatility) * np.sqrt(trading_days) if volatility > 0 else np.nan
excess_returns = merged_df['Excess Return'].dropna()
sharpe_ratio = calculate_sharpe_ratio(excess_returns)
# Calculate daily metrics
avg_daily_excess_return = excess_returns.mean()
daily_excess_volatility = excess_returns.std()
annualized_volatility = daily_excess_volatility * np.sqrt(252)
annualised_risk_premium = avg_daily_excess_return * 252
# Compute cumulative return based on Total AUM evolution
initial_aum = result_df['Total AUM'].iloc[0]
final_aum = result_df['Total AUM'].iloc[-1]
cumulative_return = (final_aum / initial_aum) - 1
# Annualized return using the calendar days between first and last observation
num_days = (result_df['Date'].iloc[-1] - result_df['Date'].iloc[0]).days
annualized_return = (final_aum / initial_aum) ** (365 / num_days) - 1
# Maximum drawdown calculation using daily returns
def compute_max_drawdown(returns):
cumulative = (1 + returns).cumprod()
running_max = cumulative.cummax()
drawdown = (cumulative - running_max) / running_max
return drawdown.min()
max_drawdown = compute_max_drawdown(result_df['Return'])
# Calculate annual returns based on last day-of-month Total AUM values
# Ensure Date is set as index for resampling
result_df = result_df.set_index('Date')
# Resample Total AUM values to month-end (last trading day of each month)
monthly_values = result_df['Total AUM'].resample('M').last()
# For each year, compute annual return as (last month-end value / first month-end value) - 1
annual_returns = monthly_values.groupby(monthly_values.index.year).apply(lambda x: x.iloc[-1] / x.iloc[0] - 1)
min_annual_return = annual_returns.min()
max_annual_return = annual_returns.max()
# Create a DataFrame with all metrics
metrics_df = pd.DataFrame({
'Metric': [
'Cumulative Return',
'Annualized Return',
'Average Daily Excess Return',
'Daily Excess Volatility',
'Annualized Volatility',
'Sharpe Ratio',
'Annualized Risk Premium',
'Maximum Drawdown',
'Minimum Annual Return',
'Maximum Annual Return'
],
'Value': [
cumulative_return,
annualized_return,
avg_daily_excess_return,
daily_excess_volatility,
annualized_volatility,
sharpe_ratio,
annualised_risk_premium,
max_drawdown,
min_annual_return,
max_annual_return
]
})
# Format the values as percentages where appropriate
percentage_metrics = [
'Cumulative Return', 'Annualized Return', 'Average Daily Excess Return',
'Daily Excess Volatility', 'Annualized Volatility', 'Maximum Drawdown',
'Minimum Annual Return', 'Maximum Annual Return', 'Annualized Risk Premium'
]
for metric in percentage_metrics:
metrics_df.loc[metrics_df['Metric'] == metric, 'Value'] = metrics_df.loc[
metrics_df['Metric'] == metric, 'Value'
].apply(lambda x: f"{x:.2%}")
# Format Sharpe Ratio to 2 decimal places
metrics_df.loc[metrics_df['Metric'] == 'Sharpe Ratio', 'Value'] = metrics_df.loc[
metrics_df['Metric'] == 'Sharpe Ratio', 'Value'
].apply(lambda x: f"{x:.2f}")
print("\nPerformance Metrics for the Portfolio:")
print(metrics_df.to_string(index=False))
-----------------------------------------------------------------------
import pandas as pd
import numpy as np
from statsmodels.tools.tools import add_constant
from statsmodels.discrete.discrete_model import Logit
# === STEP 1: LOAD MACRO DATA === #
def load_macro_data(filepath):
"""
Load macro indicators from Excel.
- VIX, VIX3M from 'Eq' sheet
- Remove weekends
"""
vix_data = pd.read_excel(filepath, sheet_name='Eq', index_col=0, parse_dates=True, usecols=[0, 4, 5])
vix_data.columns = ['VIX', 'VIX3M']
vix_data = vix_data[vix_data.index.dayofweek < 5]
return vix_data
# === STEP 2: SLOPE + LOGIT MODEL === #
def calculate_slopes(series, window=5):
return series.rolling(window).apply(lambda x: np.polyfit(np.arange(len(x)), x, 1)[0], raw=True)
def split_slope(slope_series):
return slope_series.clip(lower=0), slope_series.clip(upper=0)
def logistic_regression(X, y):
X = add_constant(X)
model = Logit(y, X).fit(disp=False)
return model
# === STEP 3: BACKTEST === #
def run_vix_slope_signal_backtest(prices, macro_data, start_date='2008-01-01', slope_window=5,
lookback_window=252, forecast_horizon=5, prob_threshold=0.6):
macro_data = macro_data.copy()
macro_data['VIX_Spread'] = macro_data['VIX'] - macro_data['VIX3M']
macro_data['Slope'] = calculate_slopes(macro_data['VIX_Spread'], window=slope_window)
macro_data['Slope+'], macro_data['Slope−'] = split_slope(macro_data['Slope'])
prices = prices.copy()
prices['SPX_Return'] = prices['SPY US Equity'].pct_change(forecast_horizon).shift(-forecast_horizon)
# Merge daily (no resampling)
merged = macro_data[['Slope+', 'Slope−']].join(prices[['SPX_Return']], how='inner').dropna()
merged = merged[merged.index >= start_date]
results = []
full_data = macro_data[['Slope+', 'Slope−']].join(prices[['SPX_Return']], how='inner').dropna()
for i in range(len(merged)):
current_date = merged.index[i]
full_data_current_idx = full_data.index.get_loc(current_date)
if full_data_current_idx < lookback_window:
continue
train = full_data.iloc[full_data_current_idx - lookback_window:full_data_current_idx]
X_train = train[['Slope+', 'Slope−']]
y_train = (train['SPX_Return'] < 0).astype(int)
try:
model = logistic_regression(X_train, y_train)
# Modified this part:
X_test_data = merged[['Slope+', 'Slope−']].iloc[i:i+1]
# Make sure to use the same structure as training data
X_test = add_constant(X_test_data, has_constant='add')
prob = model.predict(X_test)[0]
actual_return = merged['SPX_Return'].iloc[i]
signal = 'risk_off' if prob > prob_threshold else 'risk_on'
results.append({
'Date': current_date,
'Predicted_Prob_Negative': prob,
'Actual_Return': actual_return,
'Signal': signal,
'Was_Negative': actual_return < 0,
'Correct_Prediction': (actual_return < 0 and signal == 'risk_off'),
'Slope+': merged['Slope+'].iloc[i],
'Slope−': merged['Slope−'].iloc[i]
})
except Exception as e:
print(f"Error at date {current_date}: {str(e)}")
continue
df_results = pd.DataFrame(results).set_index('Date')
return df_results
# === STEP 4: EVALUATION & EXPORT === #
def evaluate_and_export_results(df_results, lookback_window, prob_threshold):
total_neg = df_results['Was_Negative'].sum()
correct = df_results['Correct_Prediction'].sum()
false_alarms = ((df_results['Signal'] == 'risk_off') & (~df_results['Was_Negative'])).sum()
precision = correct / (correct + false_alarms + 1e-8)
print("\n📊 Accuracy Report (Threshold = {:.2f}):".format(prob_threshold))
print(f"Negative Return Days: {total_neg}")
print(f"Correct Risk-Off Calls: {correct}")
print(f"False Alarms: {false_alarms}")
print(f"Precision on Negatives: {precision:.4f}")
filename = f"logit_vix_signals_L{lookback_window}_T{int(prob_threshold*100)}.xlsx"
df_results.to_excel(filename)
print(f"\n📁 Exported to: {filename}")
# === STEP 5: EXECUTE === #
price_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Historic Prices.xlsx"
macro_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Momentum Strategy Overlay Data.xlsx"
prices = pd.read_excel(price_filepath, sheet_name='Sheet1', index_col=0, parse_dates=True)
macro_data = load_macro_data(macro_filepath)
lookback_window = 504
start_date = '2008-01-01'
prob_threshold = 0.6 # Increase to reduce false alarms
df_results = run_vix_slope_signal_backtest(prices, macro_data,
start_date=start_date,
lookback_window=lookback_window,
prob_threshold=prob_threshold)
evaluate_and_export_results(df_results, lookback_window, prob_threshold)Editor is loading...
Leave a Comment