from scipy import *
from numpy import *
def f(x):
return exp(x) + x ** 2 - 1
xs = linspace(-5,5,256, endpoint=True)
plot( xs, f(xs) )
show()
from scipy import optimize
help(optimize.fminbound)
Help on function fminbound in module scipy.optimize.optimize: fminbound(func, x1, x2, args=(), xtol=1e-05, maxfun=500, full_output=0, disp=1) Bounded minimization for scalar functions. Parameters ---------- func : callable f(x,*args) Objective function to be minimized (must accept and return scalars). x1, x2 : float or array scalar The optimization bounds. args : tuple Extra arguments passed to function. xtol : float The convergence tolerance. maxfun : int Maximum number of function evaluations allowed. full_output : bool If True, return optional outputs. disp : int If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Returns ------- xopt : ndarray Parameters (over given interval) which minimize the objective function. fval : number The function value at the minimum point. ierr : int An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc : int The number of function calls made. Notes ----- Finds a local minimizer of the scalar function `func` in the interval x1 < xopt < x2 using Brent's method. (See `brent` for auto-bracketing).
m = optimize.fminbound(f, -5, 5)
print m
-0.351733622082
xs = linspace(-0.5, 0, endpoint=True)
plot( xs, f(xs), color='blue' )
plot( [m], [f(m)], marker='o', markerfacecolor='red', markeredgecolor='red')
show()
xs = linspace(-5, 5, 20, endpoint=True)
noise = (rand(20) - 0.5) # vecteur aléatoire avec des valeurs entre -0.5 et 0.5
ys = 2 + xs + noise
noise[0:5]
array([-0.29847372, 0.499184 , -0.07437038, -0.22707507, -0.08418738])
plot(xs, ys, linestyle='*', marker='^')
xs2 = linspace(-5, 5, 256, endpoint=True)
plot(xs2, 2+xs2, color='blue')
show()
coefs = polyfit(xs, ys, 1) # troisième argument = degré de l'approximation
coefs
array([ 1.0133437 , 2.02869257])
a, b = coefs[0], coefs[1]
plot(xs, ys, linestyle='*', marker='^', label='echantillon')
plot(xs2, a*xs2+b, color='red', linestyle='--', linewidth=2, label='approximation lineaire')
legend(loc=2)
show()
def f(x):
return exp(x) + x ** 2 - 1
xs = linspace(-5,5,256, endpoint=True)
figure(figsize=(8,6))
plot(xs, f(xs), label="f(x) = exp(x) + x*x - 1", linewidth=3)
## approximation de degré 2
#coefs = polyfit(xs, f(xs), 2)
#a, b, c = coefs[0:3]
#plot(xs, a*(xs**2) + b*xs + c, label="approximation de degre 2", linewidth=2, color='green', linestyle='--')
# approximation de degré 3
coefs = polyfit(xs, f(xs), 3)
a, b, c, d = coefs[0:4]
plot(xs, a*(xs**3) + b*(xs**2) + c*xs + d, label="approximation de degre 3", linewidth=2, color='red', linestyle='--')
legend(loc=9,prop={'size':14})
show()
a = rand() # 0 <= a < 1
b = 1+rand() # 1 <= b < 2
def f(x):
return a * (x ** b)
xs = linspace(0, 2, 20)
ys = f(xs) + (rand(20) - 0.5)/10 # f(xs) + noise between -0.05 and 0.05
# prototype pour la fonction d'approximation
def fitfunc(p, x): # p = liste des paramètres inconnus, x = point d'évaluation
return p[0] * (x ** p[1])
# fonction d'erreur associée
def errfunc(p, x, y):
return y - fitfunc(p, x)
# valeur initiale pour nos paramètres
pinit = [0.5, 1.5]
# optimisation des paramètres par la méthode des moindres carrés
res = optimize.leastsq(errfunc, pinit, args=(xs, ys), full_output=1)
pfinal = res[0]
print 'Valeurs réelles :'
print 'a = ', a, '\tb = ', b
print
print 'Valeurs approchées :'
print 'a = ', pfinal[0], '\tb = ', pfinal[1]
print
plot(xs, ys, linestyle='*', marker='^')
plot(xs, fitfunc(pfinal, xs), linestyle='--', linewidth=2, color='red')
show()
Valeurs réelles : a = 0.999350290492 b = 1.56325943317 Valeurs approchées : a = 1.00128508597 b = 1.57056639842