Untitled
unknown
plain_text
a year ago
4.1 kB
4
Indexable
import numpy as np import emcee import matplotlib.pyplot as plt from scipy.stats import uniform # Provided data T_0_data = np.array([0, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]) # T_0 values in uM R_data = np.array([1, 0.98335, 0.9736, 0.96248, 0.89727, 0.83534, 0.69785, 0.52721, 0.34313, 0.19604, 0.09583, 0.02793, 0]) # R values sigma_R = np.array([0.01203, 0.01603, 0.01744, 0.01448, 0.0188, 0.00997, 0.01073, 0.00778, 0.0047, 0.00599, 0.00352, 0.00342, 0.00457]) # Errors in R # Given constants and their errors L_0 = 7.0 # Constant value of L_0 in uM sigma_T_0 = 0.082 # Error range in T_0 absolute_L_0_error = 0.161 # Absolute error in L_0 # Define the model def model(K_d, T_0, L_0): term1 = (K_d + T_0 - L_0) / (2 * L_0) term2 = np.sqrt(term1**2 + K_d / L_0) return -term1 + term2 # Define the likelihood function incorporating uncertainty in T_0 and L_0 def log_likelihood(theta, R, T_0, L_0, sigma_R, sigma_T_0, absolute_L_0_error): K_d = theta[0] total_log_likelihood = 0 # Reduced resolution resolution = 20 for i in range(len(R)): T_0_min = T_0[i] - sigma_T_0 T_0_max = T_0[i] + sigma_T_0 L_0_min = L_0 - absolute_L_0_error L_0_max = L_0 + absolute_L_0_error T_0_values = np.linspace(T_0_min, T_0_max, resolution) L_0_values = np.linspace(L_0_min, L_0_max, resolution) log_likelihoods = np.zeros_like(T_0_values) for j, T_0_val in enumerate(T_0_values): try: model_R_values = model(K_d, T_0_val, L_0_values) prob_density_T_0 = uniform.pdf(T_0_val, loc=T_0_min, scale=sigma_T_0 * 2) prob_density_L_0 = uniform.pdf(L_0_values, loc=L_0_min, scale=absolute_L_0_error * 2) if np.any(np.isnan(prob_density_L_0)) or np.any(np.isnan(model_R_values)): continue weighted_model_R = np.sum(prob_density_L_0 * model_R_values) / np.sum(prob_density_L_0) log_likelihoods[j] = -0.5 * ((R[i] - weighted_model_R) / sigma_R[i])**2 * prob_density_T_0 except: continue if np.sum(log_likelihoods) <= 0: total_log_likelihood += -np.inf else: total_log_likelihood += np.log(np.sum(log_likelihoods)) return total_log_likelihood # Define the priors def log_prior(theta): K_d = theta[0] if 0 < K_d < 100: # Example bounds for K_d return 0.0 return -np.inf # Define the posterior probability def log_probability(theta, R, T_0, L_0, sigma_R, sigma_T_0, absolute_L_0_error): lp = log_prior(theta) if not np.isfinite(lp): return -np.inf return lp + log_likelihood(theta, R, T_0, L_0, sigma_R, sigma_T_0, absolute_L_0_error) # Set up the MCMC sampler ndim = 1 # Number of parameters (K_d) nwalkers = 50 # Number of MCMC walkers nsteps = 500 # Reduced number of MCMC steps # Initial positions of the walkers initial_pos = [np.array([0.5]) + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)] # Run the MCMC sampler sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=(R_data, T_0_data, L_0, sigma_R, sigma_T_0, absolute_L_0_error)) sampler.run_mcmc(initial_pos, nsteps, progress=True) # Analyze the results samples = sampler.get_chain(discard=100, thin=10, flat=True) K_d_samples = samples[:, 0] # Calculate the 68% confidence interval K_d_median = np.median(K_d_samples) K_d_lower = np.percentile(K_d_samples, 16) K_d_upper = np.percentile(K_d_samples, 84) print(f"K_d: {K_d_median} (+{K_d_upper - K_d_median}, -{K_d_median - K_d_lower})") # Plot the results plt.figure() plt.hist(K_d_samples, bins=50, alpha=0.75) plt.axvline(K_d_median, color='r', linestyle='--') plt.axvline(K_d_lower, color='r', linestyle=':') plt.axvline(K_d_upper, color='r', linestyle=':') plt.xlabel('K_d') plt.ylabel('Frequency') plt.title('Posterior Distribution of K_d with Uncertainty in T_0 and L_0') plt.show()
Editor is loading...
Leave a Comment