Skip to content

Commit 124b279

Browse files
authored
Add files via upload
1 parent 4d99672 commit 124b279

File tree

7 files changed

+434
-0
lines changed

7 files changed

+434
-0
lines changed

armijo.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import numpy as np
2+
from scipy import optimize as opt
3+
4+
"""
5+
Armijo linesearch Algorithm
6+
"""
7+
8+
def schrittweite(f, fd, xk, d, sigma, rho, gamma):
9+
10+
dnorm = (np.linalg.norm(d))**2
11+
t = - gamma * (np.dot(fd(xk), d) / dnorm)
12+
while f(xk + t * d) > f(xk) + t * sigma * np.dot(fd(xk), d):
13+
t = rho * t
14+
15+
return t

bfgs.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import numpy as np
2+
from numpy.linalg import inv
3+
from scipy import optimize as opt
4+
import math
5+
import armijo
6+
7+
class BFGS(object):
8+
"""
9+
Constructor BFGS
10+
"""
11+
def __init__ (self, f, fd, H, xk, eps):
12+
self.fd = fd
13+
self.H = H
14+
self.xk = xk
15+
self.eps = eps
16+
self.f = f
17+
return
18+
"""
19+
BFGS-Method
20+
"""
21+
22+
def work (self):
23+
f = self.f
24+
fd = self.fd
25+
H = self.H
26+
xk = self.xk
27+
eps = self.eps
28+
"""
29+
Initial Matrix for BFGS (Identitiy Matrix)
30+
"""
31+
E = np.array([ [1., 0.],
32+
[0., 1.] ])
33+
xprev = xk
34+
it = 0
35+
maxit = 10000
36+
37+
while (np.linalg.norm(fd(xk)) > eps) and (it < maxit):
38+
Hfd = inv(E)@fd(xprev)
39+
xk = xprev - Hfd
40+
sk = np.subtract(xk, xprev)
41+
yk = np.subtract(fd(xk), fd(xprev))
42+
43+
b1 = (1 / np.dot(yk, sk))*(np.outer(yk, yk))
44+
sub1b2 = np.outer(sk, sk)
45+
Esk = E @ (sk)
46+
sub2b2 = (1 / np.dot(sk, Esk))
47+
sub3b2 = np.matmul(E, sub1b2)
48+
sub4b2 = np.matmul(sub3b2, E)
49+
b2 = sub2b2 * sub4b2
50+
E1 = np.add(E, b1)
51+
E = np.subtract(E1, b2)
52+
53+
xprev = xk
54+
print("Log-Values(BFGS): ", math.log10(f(xk)))
55+
it += 1
56+
57+
return xk, it

cg.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import numpy as np
2+
from numpy.linalg import inv
3+
from scipy import optimize as opt
4+
import math
5+
import armijo
6+
7+
class CGMethod(object):
8+
"""
9+
Constructor CG-Method
10+
"""
11+
def __init__ (self, f, fd, H, xk, eps):
12+
self.fd = fd
13+
self.f = f
14+
self.H = H
15+
self.xk = xk
16+
self.eps = eps
17+
return
18+
"""
19+
CG-Method
20+
"""
21+
def work (self):
22+
f = self.f
23+
fd = self.fd
24+
H = self.H
25+
xk = self.xk
26+
eps = self.eps
27+
t = 1
28+
it = 0
29+
#maxit = 10000
30+
dprev = - fd(xk)
31+
xprev = self.xk
32+
dk = None
33+
34+
while(np.linalg.norm(fd(xk)) > eps): #and (it < maxit):
35+
t = armijo.schrittweite(f, fd, xprev, dprev, sigma = 0.02, rho = 0.5, gamma = 2)
36+
xk = xprev + t * dprev
37+
normprev = (np.linalg.norm(fd(xprev)))**2
38+
xprev = xk
39+
normk = (np.linalg.norm(fd(xk)))**2
40+
dk = - fd(xk) + (normk / normprev)*(dprev)
41+
dprev = dk
42+
print("Log-Values(CG): ", math.log10(f(xk)))
43+
it += 1
44+
45+
return xk, it

gradv.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import numpy as np
2+
from scipy import optimize as opt
3+
import armijo
4+
import math
5+
6+
7+
8+
class GradientMethod(object):
9+
"""
10+
Constructor Gradient-Method
11+
"""
12+
def __init__ (self, f, fd, H, xk, eps):
13+
self.xk = xk
14+
self.eps = eps
15+
self.fd = fd
16+
self.f = f
17+
self.H = H
18+
return
19+
"""
20+
Gradient-Method
21+
"""
22+
23+
def work (self):
24+
f = self.f
25+
xk = self.xk
26+
eps = self.eps
27+
fd = self.fd
28+
it = 0
29+
maxit = 2000
30+
while (np.linalg.norm(fd(xk)) > eps) and (it < maxit):
31+
t = armijo.schrittweite(f, fd, xk, -fd(xk), sigma = 0.02, rho = 0.5, gamma = 0.0001)
32+
xk = xk - t * fd(xk)
33+
print("Log-Values(Gradient): ", math.log10(f(xk)))
34+
it += 1
35+
return xk, it

newtonm.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import numpy as np
2+
from numpy.linalg import inv
3+
from scipy import optimize as opt
4+
import math
5+
6+
7+
class NewtonMethod(object):
8+
"""
9+
Constructor Newton-Method
10+
"""
11+
def __init__ (self, f, fd, H, xk, eps):
12+
self.fd = fd
13+
self.H = H
14+
self.xk = xk
15+
self.eps = eps
16+
self.f = f
17+
return
18+
"""
19+
Newton-Method
20+
"""
21+
def work (self):
22+
f = self.f
23+
fd = self.fd
24+
H = self.H
25+
xk = self.xk
26+
eps = self.eps
27+
it = 0
28+
#maxit = 10000
29+
30+
while (np.linalg.norm(fd(xk)) > eps): #and (it < maxit):
31+
Hfd = inv(H(xk))@(fd(xk))
32+
xk = xk - Hfd
33+
it += 1
34+
print("Log-Values(Newton): ", math.log10(f(xk)))
35+
36+
return xk, it

ros_test.py

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
from gradv import GradientMethod
2+
import numpy as np
3+
import time
4+
from newtonm import NewtonMethod
5+
from numpy.linalg import inv
6+
from cg import CGMethod
7+
from bfgs import BFGS
8+
import armijo
9+
10+
11+
12+
class Rosenb:
13+
14+
"""
15+
Select Solver (Gradient-Method, Newton-Method, CG-Method, BFGS-Method)
16+
"""
17+
18+
19+
def algo(self, fo, alsolver):
20+
x0 = fo.x0
21+
f = fo.f
22+
fd = fo.fd
23+
H = fo.H
24+
x = fo.x0
25+
eps = 0.0001
26+
it = None
27+
while(np.linalg.norm(fd(x)) > eps):
28+
solver = None
29+
if(alsolver == 'gradv'):
30+
#Nutze Gradientenverfahren Loesung
31+
solver = GradientMethod(f, fd, H, x, eps)
32+
elif(alsolver == 'newtonm'):
33+
solver = NewtonMethod(f, fd, H, x, eps)
34+
elif(alsolver == 'cg'):
35+
solver = CGMethod(f, fd, H, x, eps)
36+
elif(alsolver == 'bfgs'):
37+
solver = BFGS(f, fd, H, x, eps)
38+
else:
39+
print("ERROR, NO SOLVER SELECTED. EXITING")
40+
exit()
41+
42+
x, it = solver.work()
43+
44+
return x, it
45+
46+
"""
47+
Create Function that should be minimized
48+
"""
49+
class Function(object):
50+
x0 = None
51+
f = None
52+
fd = None
53+
H = None
54+
55+
def __init__(self, x0, f, fd, H):
56+
self.x0 = x0
57+
self.f = f
58+
self.fd = fd
59+
self.H = H
60+
"""
61+
Rosenbrock function
62+
"""
63+
def testFunctionRosenbrock():
64+
f = lambda xy: (10*(xy[0] - xy[1]**2))**2 + (1-xy[0])**2
65+
fd = lambda xy: np.array([202.*xy[0] - 200*xy[1]**2 - 2, -400*xy[1]*(xy[0] - xy[1]**2)])
66+
H = lambda xy: np.array([ [202., -400.*xy[1] ],
67+
[-400.*xy[1], 800.*xy[1]**2 - 400.*(xy[0] - xy[1]**2) ]
68+
])
69+
x0 = np.array([-2.,2.])
70+
return Function(x0,f,fd,H)
71+
72+
"""
73+
Simple quadratic function ((x,y) -> (x + 3)^2 + y^2)
74+
"""
75+
def testFunction2():
76+
f = lambda xy: (xy[0] + 3)**2 + xy[1]**2
77+
fd = lambda xy: np.array([2*xy[0] + 6, 2*xy[1]])
78+
H = lambda xy: np.array([[2., 0.],[0., 2.]])
79+
x0 = np.array([3.,0.])
80+
return Function(x0,f,fd,H)
81+
82+
83+
print("STARTING TESTS")
84+
a = Rosenb()
85+
"""
86+
print("-----Testing quadratic(2) function-----")
87+
f = Function.testFunction2()
88+
x_gv, it_gv = a.algo(f,'gradv')
89+
x_nm, it_nm = a.algo(f, 'newtonm')
90+
x_cg, it_cg = a.algo(f, 'cg')
91+
#x_bfgs, it_bfgs = a.algo(f, 'bfgs')
92+
print("Result (Grad): ",x_gv, " steps: ", it_gv)
93+
print("Result (Newton): ",x_nm, " steps: ", it_nm)
94+
print("Result (CG): ",x_cg, "steps: ", it_cg)
95+
#print("Result (BFGS): ",x_bfgs, "steps: ", it_bfgs)
96+
assert np.allclose(x_gv, np.array([-3.,0.]), rtol=1e-04, atol=1e-05)
97+
assert np.allclose(x_nm, np.array([-3.,0.]), rtol=1e-04, atol=1e-05)
98+
assert np.allclose(x_cg, np.array([-3.,0.]), rtol=1e-04, atol=1e-05)
99+
#assert np.allclose(x_bfgs, np.array([-3.,0.]), rtol=1e-04, atol=1e-05)
100+
print(" + Pass")
101+
"""
102+
103+
print("-----Testing Rosenbrock function-----")
104+
f = Function.testFunctionRosenbrock()
105+
x_gv, it_gv = a.algo(f,'gradv')
106+
x_nm, it_nm = a.algo(f, 'newtonm')
107+
x_cg, it_cg = a.algo(f, 'cg')
108+
x_bfgs, it_bfgs = a.algo(f, 'bfgs')
109+
print("Result (Grad): ",x_gv, " steps: ", it_gv)
110+
print("Result (Newton): ",x_nm, " steps: ", it_nm)
111+
print("Result (CG): ",x_cg, "steps: ", it_cg)
112+
print("Result (BFGS): ",x_bfgs, "steps: ", it_bfgs)
113+
assert np.allclose(x_gv, np.array([1.,1.]), rtol=1e-04, atol=1e-05)
114+
assert np.allclose(x_nm, np.array([1.,1.]), rtol=1e-04, atol=1e-05)
115+
assert np.allclose(x_cg, np.array([1.,1.]), rtol=1e-04, atol=1e-05)
116+
assert np.allclose(x_bfgs, np.array([1.,1.]), rtol=1e-04, atol=1e-05)
117+
print(" + Pass")

0 commit comments

Comments
 (0)