Hello. I am trying to write the following code to have the solution of `lam_solution=lamb_i_star`

for `t<=0`

, and have `lam_solution = lam_func(time, dsigma_dt_solution)`

for `t>=0`

. It keeps giving me a non-existent jump (I am trying to replicate a figure in a paper). Any help would be great. I shouldn’t have to manually draw the line for `lamb_i_star`

when `t<0`

.

```
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
gamma = 11.2
lamb_i_star = 0.46
lamb_f_star = 0.95
lamb_c = 1.17
phi_rb_i = 0.049
mu_f = lamb_f_star/(1-(lamb_f_star/lamb_c))
phi_rb_i_star = phi_rb_i+(lamb_i_star/gamma)
phi_rb_f_star = phi_rb_i+(lamb_f_star/gamma)
sigma_i = lamb_i_star/phi_rb_i_star
#print(sigma_i)
sigma_f = lamb_f_star/phi_rb_f_star
#print(sigma_f)
def func(t,y):
sigma = y[0]
dsigma_dt = (mu_f*y[0])*((1-(y[0]/sigma_f))/(1-(y[0]/gamma)))
return [dsigma_dt]
def solve_func(func, t_eval):
y0 = [lamb_i_star / phi_rb_i_star]
solution = solve_ivp(func, [t_eval[0], t_eval[-1]], y0, method='LSODA', t_eval=t_eval)
return solution.y[0]
def lam_func(t, dsigma_dt):
a = (lamb_f_star * dsigma_dt) / sigma_f
b = (lamb_f_star * sigma_i) / (lamb_i_star * sigma_f)
c = (lamb_c / lamb_f_star) - 1
d = sigma_i / dsigma_dt
g = (lamb_f_star / lamb_c) - 1
h = 1 + (c * (sigma_i / gamma))
i = 1 + (c * (sigma_f / gamma))
j = 1 + (c * (dsigma_dt / gamma))
return a/(((b-(h/i))*(d**g)*(np.exp(-lamb_f_star*t)))+(j/i))
# Define the time range
time = np.linspace(-2, 5, 100)
# Solve dsigma_dt for all time points
dsigma_dt_solution = solve_func(func, time)
lam_solution = lam_func(time, dsigma_dt_solution)
lam_solution[time < 0] = lamb_i_star
print(lam_solution)
plt.plot(time, lam_solution)
plt.xlabel('t')
plt.ylabel(r'$\lambda$(t)')
plt.yticks(np.arange(0, 1.2, 0.2))
plt.plot([0, 0], [0, 1], 'k', linewidth=2)
plt.plot([0, time[-1]], [lamb_f_star, lamb_f_star], 'r--')
plt.grid(True)
plt.show()
```