wolfe 算法_最优化课课后作业笔记

来源:互联网 发布:网络专科学历有用吗 编辑:程序博客网 时间:2024/06/08 05:08
from numpy import *import sysMAXINT=sys.maxintdef gradient_(X):    x1=X[0];x2=X[1]    return array([(-400*(x2-x1**2)*x1-2*(1-x1)),200*(x2-x1**2)])def f(X):    x1=X[0];x2=X[1]    return 100*(x2-x1**2)**2+(1-x1)**2if "__main__":    '''    Xk:point    Pk:direction    '''    Xk=array([-1,1])    Pk=array([1,1])    mu_=0.1    sigma=0.5    Fk=f(Xk)    Gk=gradient_(Xk)    print 'Gk=%s ^T,Fk=%s'%(Gk,Fk)    print sum(gradient_(Xk)*Pk)    alpha=1;b=MAXINT;a=0;j=0    flag=1    i=0    while(flag):        Xk1=Xk+alpha*Pk        Fk1=f(Xk1)        print i,alpha,Xk1,Fk1        if Fk-Fk1<-(mu_*alpha*sum(gradient_(Xk)*Pk)):            b=alpha            alpha=(alpha+a)*0.5            i=i+1        elif sum(gradient_(Xk1)*Pk)<sigma*sum(gradient_(Xk)*Pk):            a=alpha            alpha=min(2*alpha,(a+b)*0.5)            i=1+1        else:            flag=0    Xk1 = Xk + alpha * Pk    Fk1 = f(Xk1)    print alpha,Fk1,Xk1
0 0
原创粉丝点击