Untitled

 avatar
unknown
plain_text
15 days ago
13 kB
3
Indexable
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# PARAMETERS
LOOKBACK_WINDOWS = [60, 120, 240]  # approximate 3, 6, 12 months (for daily data)
TOP_N = 5  # number of risk-on assets to select at each rebalance

# Load the historic prices data
# Assumes the CSV has dates in the first column (which we'll parse as index)
prices = pd.read_csv('historic_prices.csv', index_col=0, parse_dates=True)
prices.sort_index(inplace=True)

# Calculate daily returns for later use (for risk-adjusted momentum denominator)
daily_log_returns = np.log(prices / prices.shift(1))

# Create dictionaries to hold each momentum measure for each window
tr_mom = {}      # Total Return Momentum
pma_mom = {}     # Price minus Moving Average
ram_mom = {}     # Risk Adjusted Momentum

for window in LOOKBACK_WINDOWS:
    # Total return momentum: (P_t - P_{t-window}) / P_{t-window}
    tr_mom[window] = (prices - prices.shift(window)) / prices.shift(window)
    
    # Price minus Moving Average: (P_t / SMA(P, n)) - 1
    sma = prices.rolling(window=window).mean()
    pma_mom[window] = (prices / sma) - 1
    
    # Risk-adjusted momentum:
    # Numerator: ln(P_t / P_{t-window})
    num = np.log(prices / prices.shift(window))
    # Denom: Rolling sum of absolute daily log returns over the window
    den = daily_log_returns.abs().rolling(window=window).sum()
    ram_mom[window] = num / den

# We now have 3 sets of signals each with 3 windows (9 signals total)
# For each rebalancing date, we will compute the composite score for each asset by summing its ranks

# Determine monthly rebalancing dates (we use month end dates)
rebalance_dates = prices.resample('M').last().index

# Prepare containers for storing portfolio weights and performance
weights_list = []  # each element is a DataFrame of asset weights at a given rebalance date
portfolio_returns = pd.Series(dtype=float)
aum = []  # cumulative AUM at each rebalance period

# Starting AUM
initial_aum = 100.0
current_aum = initial_aum

# For each rebalance date, compute composite momentum scores from available signals
for i, reb_date in enumerate(rebalance_dates):
    # Ensure that we have enough data for the longest lookback window
    if reb_date < prices.index[LOOKBACK_WINDOWS[-1]]:
        continue

    # Extract the prices as of the rebalance date (we assume signals are computed up to and including that day)
    # We need to get the signal values for each asset at that date from each measure/window.
    composite_score = pd.Series(0, index=prices.columns, dtype=float)
    positive_signals_count = pd.Series(0, index=prices.columns, dtype=int)
    
    # For each of the 9 signals, compute the cross-sectional rank (higher is better).
    # Here we use ranking on the value (using the 'average' method).
    for window in LOOKBACK_WINDOWS:
        for signal_dict in [tr_mom, pma_mom, ram_mom]:
            # Get the signal values on the rebalance date; drop NaNs
            signal_vals = signal_dict[window].loc[reb_date]
            # Drop assets with NaN (due to insufficient history)
            valid_signal = signal_vals.dropna()
            if valid_signal.empty:
                continue
            # Rank: highest value gets the highest rank
            ranks = valid_signal.rank(method='average', ascending=True)
            # Note: we want higher momentum to have higher scores. One option is to simply use the rank number.
            # Scale rank to have mean zero (optional) or just sum them.
            composite_score[valid_signal.index] += ranks
            # Count how many signals are positive
            positive_signals_count[valid_signal.index] += (valid_signal > 0).astype(int)
    
    # Determine an absolute momentum filter: e.g. require at least 5 out of 9 signals to be positive.
    abs_threshold = 5  
    qualified = positive_signals_count >= abs_threshold
    
    # Keep only qualified assets
    composite_score = composite_score[qualified]
    
    # If no asset qualifies, go risk-off (empty weights, i.e. 100% risk-off asset)
    if composite_score.empty:
        selected_assets = []
    else:
        # Select the top TOP_N assets based on composite score (highest sum of ranks)
        selected_assets = composite_score.sort_values(ascending=False).head(TOP_N).index.tolist()
    
    # Define weights: if no asset qualifies, we allocate to risk-off asset (we assume the risk-off asset is the one with ticker 'SHV' or similar)
    # You can adjust this to match your risk-off universe.
    if len(selected_assets) == 0:
        allocation = pd.Series(0, index=prices.columns)
        if 'SHV' in prices.columns:
            allocation['SHV'] = 1.0
        else:
            # if no specific risk-off asset is provided, stay in cash (represented by NaN or a cash series)
            allocation['CASH'] = 1.0
    else:
        # Equal weighting for selected assets
        allocation = pd.Series(0, index=prices.columns, dtype=float)
        allocation[selected_assets] = 1.0 / len(selected_assets)
    
    allocation.name = reb_date
    weights_list.append(allocation)
    
    # Determine the next rebalancing date (or use the end of the series)
    if i < len(rebalance_dates) - 1:
        next_reb_date = rebalance_dates[i+1]
    else:
        next_reb_date = prices.index[-1]
    
    # Calculate returns from current rebalance date to next rebalancing date (using daily returns)
    period_prices = prices.loc[reb_date:next_reb_date]
    
    # Compute daily portfolio returns: weighted sum of asset returns.
    # First, compute daily returns for the period
    period_daily_returns = period_prices.pct_change().fillna(0)
    # Multiply each column by its weight (broadcasting)
    portfolio_daily_returns = (period_daily_returns * allocation).sum(axis=1)
    
    # Compound returns for the period (excluding the first day which is rebalance day)
    period_return = (1 + portfolio_daily_returns).cumprod().iloc[-1] - 1
    
    # Update AUM
    current_aum = current_aum * (1 + period_return)
    aum.append(current_aum)
    
    # Save the period return (using the next rebalancing date as the label)
    portfolio_returns[next_reb_date] = period_return

# Combine weights into a DataFrame for inspection
weights_df = pd.DataFrame(weights_list)
weights_df.index.name = 'Rebalance_Date'

# Create a DataFrame for portfolio performance summary
performance_df = pd.DataFrame({
    'Period_Return': portfolio_returns,
    'AUM': pd.Series(aum, index=portfolio_returns.index)
})

# Print outputs
print("=== Portfolio Weights at Rebalance Dates ===")
print(weights_df)
print("\n=== Portfolio Period Returns ===")
print(portfolio_returns)
print("\n=== Portfolio AUM Over Time ===")
print(performance_df)

# Optionally, plot the cumulative AUM
plt.figure(figsize=(10, 5))
plt.plot(performance_df.index, performance_df['AUM'], marker='o')
plt.xlabel('Date')
plt.ylabel('Portfolio AUM')
plt.title('Cumulative Portfolio AUM')
plt.grid(True)
plt.show()


import matplotlib.pyplot as plt

# -------------------------------
# 4. Performance Metrics Calculation
# -------------------------------

# Load the risk free rate data
risk_free_filepath = r"\\asiapac.nom\data\MUM\IWM\India_IWM_IPAS\Reet\Momentum Strategy\Codes\Risk Free Rate.xlsx"
risk_free_df = pd.read_excel(risk_free_filepath, sheet_name="Risk Free Rate", parse_dates=[0])
risk_free_df.columns = ['Date', '1m', '3m']

def calculate_sharpe_ratio(returns, period):
    volatility = returns.std()
    avg_return = returns.mean()
    # Map rebalance period to an annualization factor
    annualization_factor = {1: 12, 2: 6, 3: 4}.get(period, 12)  # Default to 12 if not found
    return (avg_return / volatility) * (annualization_factor ** 0.5) if volatility > 0 else np.nan

# Ensure that there is a 'Date' column (if not, reset the index)
if 'Date' not in result_df.columns:
    result_df = result_df.reset_index().rename(columns={'index': 'Date'})

# Merge risk free rate data on the 'Date' column
merged_df = pd.merge(result_df, risk_free_df, on='Date', how='left')

# Adjust risk-free rate based on the rebalance period
if rebalance_period == 1:
    merged_df['rf_adjust'] = merged_df['1m'] / 12
elif rebalance_period == 2:
    merged_df['rf_adjust'] = merged_df['1m'] / 6
elif rebalance_period == 3:
    merged_df['rf_adjust'] = merged_df['1m'] / 4
else:
    merged_df['rf_adjust'] = 0

# Compute excess returns by subtracting the risk free rate (adjusted to monthly) from monthly returns
merged_df['Excess Return'] = merged_df['Return'] - (merged_df['rf_adjust'] / 100)

# Calculate performance metrics using excess returns
excess_returns = merged_df['Excess Return'].dropna()
sharpe_ratio = calculate_sharpe_ratio(excess_returns, rebalance_period)

annualization_factor = {1: 12, 2: 6, 3: 4}.get(rebalance_period, 12)
avg_monthly_excess_return = excess_returns.mean()
monthly_excess_volatility = excess_returns.std()
annualized_volatility = monthly_excess_volatility * (annualization_factor ** 0.5)
annualised_risk_premium = avg_monthly_excess_return * annualization_factor

# Calculate cumulative return and annualized return
initial_aum = result_df['Portfolio Value'].iloc[0]
final_aum = result_df['Portfolio Value'].iloc[-1]
cumulative_return = (final_aum / initial_aum) - 1

# Convert start_date and end_date to datetime objects if necessary
from datetime import datetime
if isinstance(start_date, str):
    start_date = datetime.strptime(start_date, '%Y-%m-%d')
if isinstance(end_date, str):
    end_date = datetime.strptime(end_date, '%Y-%m-%d')

num_months = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
annualized_return = ((1 + cumulative_return) ** (12 / num_months)) - 1

def compute_max_drawdown(returns):
    cumulative = (1 + returns).cumprod()
    running_max = cumulative.cummax()
    drawdown = (cumulative - running_max) / running_max
    return drawdown.min()

max_drawdown = compute_max_drawdown(result_df['Return'])

# Calculate annual returns
annual_returns = result_df.groupby(result_df['Date'].dt.year)['Return'].apply(lambda x: (1 + x).prod() - 1)
min_annual_return = annual_returns.min()
max_annual_return = annual_returns.max()

# Create a DataFrame with all metrics
metrics_df = pd.DataFrame({
    'Metric': [
        'Cumulative Return',
        'Annualized Return',
        'Average Monthly Excess Return',
        'Monthly Excess Volatility',
        'Annualized Volatility',
        'Sharpe Ratio',
        'Annualized Risk Premium',
        'Maximum Drawdown',
        'Minimum Annual Return',
        'Maximum Annual Return'
    ],
    'Value': [
        cumulative_return,
        annualized_return,
        avg_monthly_excess_return,
        monthly_excess_volatility,
        annualized_volatility,
        sharpe_ratio,
        annualised_risk_premium,
        max_drawdown,
        min_annual_return,
        max_annual_return
    ]
})

# Format the values as percentages where appropriate
percentage_metrics = ['Cumulative Return', 'Annualized Return', 'Average Monthly Excess Return', 
                      'Monthly Excess Volatility', 'Annualized Volatility', 'Maximum Drawdown', 
                      'Minimum Annual Return', 'Maximum Annual Return', 'Annualized Risk Premium']

for metric in percentage_metrics:
    metrics_df.loc[metrics_df['Metric'] == metric, 'Value'] = metrics_df.loc[metrics_df['Metric'] == metric, 'Value'].apply(lambda x: f"{x:.2%}")

# Format Sharpe Ratio to 2 decimal places
metrics_df.loc[metrics_df['Metric'] == 'Sharpe Ratio', 'Value'] = metrics_df.loc[metrics_df['Metric'] == 'Sharpe Ratio', 'Value'].apply(lambda x: f"{float(x):.2f}")

print("\nPerformance Metrics for the Portfolio:")
print(metrics_df.to_string(index=False))

# -------------------------------
# 5. Plotting the Weights Evolution
# -------------------------------

# Provided color palette for plotting (one color per asset)
colors = ['#CA2420', '#737373', '#80A9AE', '#00305C', '#80003F', '#CC8D19', 
          '#AEADB0', '#00713B', '#DCB940', '#00677A', '#9A9500', '#8F3336', 
          '#5A7400', '#B0CAD0', '#6077A3', '#995E7A', '#DCB172', '#D4D4D4', 
          '#6B9977', '#E5CC89', '#649CB3', '#B7B56B', '#BF8B79', '#8F9D66', '#000000']

# Plot evolution of asset weights over rebalancing dates from weights_df
plt.figure(figsize=(12, 6))
for i, ticker in enumerate(weights_df.columns):
    # Use modulo to cycle through colors if there are more tickers than colors provided.
    plt.plot(weights_df.index, weights_df[ticker], label=ticker, color=colors[i % len(colors)], marker='o')

plt.xlabel('Rebalance Date')
plt.ylabel('Asset Weight')
plt.title('Evolution of Asset Weights Over Time')
plt.legend(loc='upper left', bbox_to_anchor=(1,1))
plt.tight_layout()
plt.show()
Editor is loading...
Leave a Comment