scipy.optimize.least_squares() runs 5 times and gives back initial guess everytime

50 Views Asked by At

I am running the least_squares optimization fit my data against measured values. I have created a cost function and all function parameters seem to be fine, but the result is the exact same as the initial guess I provided. It doesn't deviate at all.

Thank you for your help in advance.

def LSMM(initial_guess, ocp_cathode, ocp_anode,soc_values, soc, ocv_entries):

    def ocp_pe(alpha_p, beta_p, ocp_cathode, soc_values):
        ocp_p = {k_soc:v for k_soc, v in zip(np.around(alpha_p*soc_values+beta_p,3), ocp_cathode)}
        return ocp_p

    def ocp_ne(alpha_n, beta_n, ocp_anode, soc_values):
        ocp_n = {k_soc:v for k_soc, v in zip(np.round(alpha_n*soc_values+beta_n,3), ocp_anode)}
        return ocp_n
    
    def ocv_cell(soc, ocv_datapoints):
        ocv = {k_soc:v for k_soc, v in zip(np.round(soc,3), ocv_datapoints)}
        return ocv

    def cost_function(params, ocp_cathode, ocp_anode, soc_values, soc, ocv_datapoints):
        print("here")
        alpha_p, beta_p, alpha_n, beta_n = params
        print(params)
        ocp_p = ocp_pe(alpha_p, beta_p, ocp_cathode, soc_values)
        ocp_n = ocp_ne(alpha_n, beta_n, ocp_anode, soc_values)
        ocv = ocv_cell(soc, ocv_datapoints)
        ocv_cat = []
        ocv_an = []
        for i in ocv.keys():
            ocv_cat.append(ocp_p[i])
            ocv_an.append(ocp_n[i])
        
        ocv_pred = np.array(ocv_cat) - np.array(ocv_an)
        diff = np.sqrt(np.sum((ocv_pred - np.array(list(ocv.values()))) ** 2))
        #return np.sqrt(np.mean((ocv_pred - np.array(list(ocv.values()))) ** 2))
        return diff

    opti = least_squares(cost_function, initial_guess,  args = (ocp_cathode, ocp_anode, soc_values, soc, ocv_entries),method='trf')
    alpha_p, beta_p, alpha_n, beta_n = opti.x
    jac = opti.grad
    #stat = opti.status
    print("alpha p: ", alpha_p)
    print("beta p: ", beta_p)
    print("alpha n: ", alpha_n)
    print("beta n: ", beta_n)
    print(jac)
    o_p = ocp_pe(alpha_p, beta_p, ocp_cathode, soc_values)
    o_n = ocp_ne(alpha_n, beta_n, ocp_anode, soc_values)
    o = ocv_cell(soc, ocv_entries)
    ocv_cat = []
    ocv_an = []
    for j in o.keys():
        ocv_cat.append(o_p[j])
        ocv_an.append(o_n[j])

    ocv_pred = np.array(ocv_cat) - np.array(ocv_an)

    plt.figure(figsize=(10, 6))
    plt.plot(list(o_p.keys()), list(o_p.values()), label="Cathode")
    plt.plot(list(o_n.keys()), list(o_n.values()), label="Anode")
    plt.plot(list(o.keys()), ocv_pred, label="OCV Predicted")
    plt.plot(list(o.keys()), list(o.values()), label ="OCV Measured")
    plt.xlabel("State of Charge (SoC)")
    plt.ylabel("Open Circuit Potential (OCP) [V]")
    plt.title("Open Circuit Potential")
    plt.grid(True)
    plt.legend()
    plt.show()

initial_guess = np.array([1.2,-0.1,1.2,-0.1])
LSMM(initial_guess, ocp_cathode, ocp_anode, soc_values, soc, ocv_entries) 

`

I have tried various other initial guesses and always resulting in the same guess returned.

1

There are 1 best solutions below

0
Askold Ilvento On

Your code calculates the loss function. But least_squares expects residuals. diff should be a vector of residuals, e.g.:

diff = ocv_pred - np.array(list(ocv.values())

You may target a specific loss function using the parameter scipy.optimize.(f,x,loss=...). You can see variants in scipy docs .

If you want to work with the loss function I would advise using scipy.optimize.minimize.