0% found this document useful (0 votes)
18 views23 pages

Numerical Analysis Python Code Toolkit

Uploaded by

mdmasumbillah628
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views23 pages

Numerical Analysis Python Code Toolkit

Uploaded by

mdmasumbillah628
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 23

Numerical Analysis Python Code Toolkit

# Numerical Analysis Python Code Toolkit

# Generated: 2025-08-13 03:14

# Requirements: Python 3.10+, NumPy (required); SciPy & SymPy (optional but recommended); M

# Tip: You can copy-paste any cell into a .py or Jupyter notebook. All examples are self-co
Quick Setup

# Install (if needed)


# pip install numpy scipy sympy matplotlib
import numpy as np
# For optional parts:
# from scipy import linalg, integrate, optimize, interpolate
# import sympy as sp
# import matplotlib.pyplot as plt

Note: Install and base imports. SciPy/SymPy are optional; core linear algebra uses NumPy.
Matrix Creation & Basic Operations

import numpy as np
# Create vectors and matrices
a = np.array([1, 2, 3], dtype=float)
b = np.array([4, 5, 6], dtype=float)
A = np.array([[1, 2], [3, 4]], dtype=float)
B = np.array([[5, 6], [7, 8]], dtype=float)
# Shapes and dtypes
print(A.shape, A.dtype)
# Elementwise operations
print("A + B =\n", A + B)
print("A - B =\n", A - B)
print("A * B (elementwise) =\n", A * B)
# Matrix multiplication
print("A @ B =\n", A @ B)
# Transpose, inverse, determinant
print("A.T =\n", A.T)
print("det(A) =", np.linalg.det(A))
print("inv(A) =\n", np.linalg.inv(A))
# Norms
print("||a||_2 =", np.linalg.norm(a))
print("||A||_F =", np.linalg.norm(A, 'fro'))
# Special matrices
I = np.eye(3) # Identity
Z = np.zeros((2,3))
O = np.ones((2,2))
R = np.random.default_rng(0).random((3,3))
print(I, Z.shape, O, "\nRandom:\n", R)
Solving Linear Systems (Ax=b)

import numpy as np
A = np.array([[3., 2., -1.],
[2., -2., 4.],
[-1., 0.5, -1.]])
b = np.array([1., -2., 0.])
# Direct solve
x = np.linalg.solve(A, b)
print("Solution x:", x)
# Verify
residual = A @ x - b
print("Residual:", residual)
# If A is singular or ill-conditioned, consider least squares:
x_ls, *_ = np.linalg.lstsq(A, b, rcond=None)
print("Least-squares x:", x_ls)

Note: Prefer np.linalg.solve for square full-rank A; fall back to lstsq for rank-deficient

problems.
Eigenvalues, Eigenvectors, SVD

import numpy as np
A = np.array([[4., 1.],
[2., 3.]])
# Eigen-decomposition
eigvals, eigvecs = np.linalg.eig(A)
print("Eigenvalues:", eigvals)
print("Eigenvectors (columns):\n", eigvecs)
# Singular Value Decomposition
U, s, Vt = np.linalg.svd(A, full_matrices=False)
print("U=\n", U)
print("s=", s) # Singular values
print("Vt=\n", Vt)
# Low-rank approximation (rank-1)
A1 = (U[:, :1] * s[:1]) @ Vt[:1, :]
print("Rank-1 approximation:\n", A1)

Note: SVD is robust; use it for low-rank approximation and pseudo-inverses.


LU, QR, Cholesky Decompositions

import numpy as np
A = np.array([[2., 1., 1.],
[4., -6., 0.],
[-2., 7., 2.]])
# QR decomposition (NumPy)
Q, R = np.linalg.qr(A)
print("Q=\n", Q)
print("R=\n", R)
# Cholesky (SPD matrices only)
S = np.array([[25., 15., -5.],
[15., 18., 0.],
[-5., 0., 11.]])
L = np.linalg.cholesky(S)
print("Cholesky L (S = L @ L.T):\n", L)
# LU (via SciPy if available)
try:
from scipy.linalg import lu
P, L, U = lu(A)
print("P=\n", P)
print("L=\n", L)
print("U=\n", U)
except Exception as e:
print("Install SciPy for LU: pip install scipy -> from scipy.linalg import lu")

Note: QR helps with least squares; Cholesky is efficient for SPD; LU requires SciPy or your own

implementation.
Polynomial Interpolation (Lagrange & Newton)

import numpy as np
# Lagrange interpolation (simple, O(n^2) per eval)
def lagrange_basis(x_nodes, k, x):
xk = x_nodes[k]
mask = np.arange(len(x_nodes)) != k
terms = (x - x_nodes[mask]) / (xk - x_nodes[mask])
return np.prod(terms)
def lagrange_interpolate(x_nodes, y_nodes, x):
x_nodes = np.asarray(x_nodes, dtype=float)
y_nodes = np.asarray(y_nodes, dtype=float)
return sum(y_nodes[k] * lagrange_basis(x_nodes, k, x) for k in range(len(x_nodes)))
# Newton interpolation (build divided differences once)
def divided_differences(x, y):
n = len(x)
coef = np.array(y, dtype=float).copy()
for j in range(1, n):
coef[j:n] = (coef[j:n] - coef[j-1:n-1]) / (x[j:n] - x[0:n-j])
return coef
def newton_eval(x_data, coef, x):
total = 0.0
prod = 1.0
for c, xd in zip(coef, x_data):
total += c * prod
prod *= (x - xd)
return total
# Example
x_nodes = np.array([0., 1., 2., 3.])
y_nodes = np.array([1., 2., 0., 5.])
coef = divided_differences(x_nodes, y_nodes)
x_test = 1.5
print("Lagrange at 1.5 =", lagrange_interpolate(x_nodes, y_nodes, x_test))
print("Newton at 1.5 =", newton_eval(x_nodes, coef, x_test))

Note: For many points use Newton form; for stability, consider barycentric interpolation

(SciPy).
Numerical Differentiation (Forward/Backward/Central)

import numpy as np
def f(x):
return np.sin(x)
def df_forward(f, x, h=1e-5):
return (f(x + h) - f(x)) / h
def df_backward(f, x, h=1e-5):
return (f(x) - f(x - h)) / h
def df_central(f, x, h=1e-5):
return (f(x + h) - f(x - h)) / (2*h)
x0 = 1.0
print("f'(x0) forward ~", df_forward(f, x0))
print("f'(x0) backward ~", df_backward(f, x0))
print("f'(x0) central ~", df_central(f, x0))

Note: Central difference is usually more accurate (O(h^2)). Keep h neither too small nor too

large.
Numerical Integration (Trapezoidal, Simpson, Composite)

import numpy as np
def f(x):
return np.exp(-x**2)
def trap(a, b, f):
return 0.5 * (b - a) * (f(a) + f(b))
def simpson(a, b, f):
c = 0.5 * (a + b)
return (b - a) * (f(a) + 4*f(c) + f(b)) / 6
def composite_trap(a, b, n, f):
x = np.linspace(a, b, n+1)
y = f(x)
h = (b - a) / n
return h * (0.5*y[0] + y[1:-1].sum() + 0.5*y[-1])
def composite_simpson(a, b, n, f):
if n % 2 == 1:
n += 1 # Simpson needs even n
x = np.linspace(a, b, n+1)
y = f(x)
h = (b - a) / n
return h/3 * (y[0] + y[-1] + 4*y[1:-1:2].sum() + 2*y[2:-2:2].sum())
print("Trap [0,1] ~", trap(0,1,f))
print("Simpson [0,1] ~", simpson(0,1,f))
print("Composite Trap n=100 ~", composite_trap(0,1,100,f))
print("Composite Simpson n=100 ~", composite_simpson(0,1,100,f))

Note: Simpson is typically more accurate than trapezoidal for smooth functions.
Root Finding (Bisection, Newton, Secant)

import numpy as np
def g(x):
return x**3 - x - 2
def gprime(x):
return 3*x**2 - 1
def bisection(f, a, b, tol=1e-8, maxit=100):
fa, fb = f(a), f(b)
if fa * fb > 0:
raise ValueError("f(a) and f(b) must have opposite signs")
for _ in range(maxit):
c = 0.5*(a+b)
fc = f(c)
if abs(fc) < tol or (b-a)/2 < tol:
return c
if fa * fc < 0:
b, fb = c, fc
else:
a, fa = c, fc
return 0.5*(a+b)
def newton(f, fp, x0, tol=1e-10, maxit=50):
x = x0
for _ in range(maxit):
fx = f(x)
d = fp(x)
if d == 0:
raise ZeroDivisionError("Zero derivative")
x_new = x - fx/d
if abs(x_new - x) < tol:
return x_new
x = x_new
return x
def secant(f, x0, x1, tol=1e-10, maxit=100):
for _ in range(maxit):
f0, f1 = f(x0), f(x1)
if f1 - f0 == 0:
return x1
x2 = x1 - f1*(x1 - x0)/(f1 - f0)
if abs(x2 - x1) < tol:
return x2
x0, x1 = x1, x2
return x1
print("Bisection on [1,2]:", bisection(g, 1, 2))
print("Newton from 1.5:", newton(g, gprime, 1.5))
print("Secant from 1,2:", secant(g, 1.0, 2.0))

Note: Bisection is robust; Newton is fast but needs derivative & good initial guess; Secant

avoids derivative.
Least Squares (Normal Equations & Polyfit)

import numpy as np
# Data
x = np.array([0., 1., 2., 3., 4.])
y = np.array([1., 2.2, 2.9, 3.7, 5.1])
# Linear model y = a*x + b
X = np.vstack([x, np.ones_like(x)]).T
beta, *_ = np.linalg.lstsq(X, y, rcond=None)
a, b = beta
print(f"y {a:.4f} x + {b:.4f}")
# Polynomial fit (degree 2)
coeff2 = np.polyfit(x, y, 2)
p2 = np.poly1d(coeff2)
print("Quadratic fit coefficients:", coeff2)
print("Predict at x=2.5 ->", p2(2.5))

Note: np.linalg.lstsq solves least squares; polyfit wraps this for polynomial models.
ODE Solvers (Euler & RK4; solve_ivp)

import numpy as np
def dydt(t, y):
return -2.0*y + np.sin(t)
def euler(f, y0, t0, t1, h):
t = np.arange(t0, t1 + h, h)
y = np.zeros_like(t, dtype=float)
y[0] = y0
for i in range(len(t) - 1):
y[i+1] = y[i] + h * f(t[i], y[i])
return t, y
def rk4(f, y0, t0, t1, h):
t = np.arange(t0, t1 + h, h)
y = np.zeros_like(t, dtype=float)
y[0] = y0
for i in range(len(t) - 1):
k1 = f(t[i], y[i])
k2 = f(t[i] + 0.5*h, y[i] + 0.5*h*k1)
k3 = f(t[i] + 0.5*h, y[i] + 0.5*h*k2)
k4 = f(t[i] + h, y[i] + h*k3)
y[i+1] = y[i] + (h/6.0)*(k1 + 2*k2 + 2*k3 + k4)
return t, y
t_e, y_e = euler(dydt, y0=1.0, t0=0.0, t1=5.0, h=0.1)
t_r, y_r = rk4(dydt, y0=1.0, t0=0.0, t1=5.0, h=0.1)
print("Euler y[0..5]:", y_e[:6])
print("RK4 y[0..5]:", y_r[:6])
# SciPy (if available):
try:
from scipy.integrate import solve_ivp
sol = solve_ivp(dydt, (0, 5), [1.0], method='RK45', max_step=0.1)
print("solve_ivp t.size:", sol.t.size, " y_last:", sol.y[0,-1])
except Exception as e:
print("Install SciPy for solve_ivp (adaptive ODE).")

Note: RK4 is far more accurate than Euler for the same step. SciPy's solve_ivp adds adaptive

step sizing.
Systems of Nonlinear Equations (Newton)

import numpy as np
def F(xy):
x, y = xy
return np.array([x**2 + y**2 - 4, # circle radius 2
x - y - 1]) # line x - y = 1
def J(xy):
x, y = xy
return np.array([[2*x, 2*y],
[1.0, -1.0]])
def newton_system(F, J, x0, tol=1e-10, maxit=50):
x = np.array(x0, dtype=float)
for _ in range(maxit):
fx = F(x)
if np.linalg.norm(fx) < tol:
return x
delta = np.linalg.solve(J(x), -fx)
x = x + delta
if np.linalg.norm(delta) < tol:
return x
return x
sol = newton_system(F, J, x0=(1.0, 0.0))
print("Solution (x,y) ~", sol)

Note: Provide Jacobian for fast convergence. Use finite differences if analytic Jacobian is

hard.
Condition Number & Stability

import numpy as np
A = np.array([[1., 1.],
[1., 1.0001]])
print("cond_2(A) ~", np.linalg.cond(A))
b = np.array([2., 2.0001])
x = np.linalg.solve(A, b)
print("Solution x:", x)
# Small perturbation in b
bp = b.copy()
bp[1] += 1e-4
xp = np.linalg.solve(A, bp)
print("Perturbed solution xp:", xp)
print("Delta x norm:", np.linalg.norm(xp - x))

Note: High condition numbers indicate sensitivity to perturbations (ill-conditioning).


Floating-Point Pitfalls

# Catastrophic cancellation example


import numpy as np
def naive_expr(x): # 1 - cos(x) for small x
return 1 - np.cos(x)
def stable_expr(x):
# Use identity: 1 - cos(x) = 2 sin^2(x/2)
return 2*np.sin(x/2)**2
x = 1e-8
print("naive:", naive_expr(x))
print("stable:", stable_expr(x))

Note: Prefer algebraically equivalent but numerically stable forms.


Gaussian Elimination with Partial Pivoting (From Scratch)

import numpy as np
def gauss_pp(A, b):
A = A.astype(float).copy()
b = b.astype(float).copy()
n = len(b)
for k in range(n-1):
# Pivot
pivot = np.argmax(np.abs(A[k:, k])) + k
if A[pivot, k] == 0:
raise ValueError("Matrix is singular")
if pivot != k:
A[[k, pivot]] = A[[pivot, k]]
b[[k, pivot]] = b[[pivot, k]]
# Eliminate
for i in range(k+1, n):
m = A[i, k] / A[k, k]
A[i, k:] -= m * A[k, k:]
b[i] -= m * b[k]
# Back substitution
x = np.zeros(n)
for i in range(n-1, -1, -1):
x[i] = (b[i] - A[i, i+1:] @ x[i+1:]) / A[i, i]
return x
A = np.array([[2., 1., 1.],
[4., -6., 0.],
[-2., 7., 2.]])
b = np.array([5., -2., 9.])
x = gauss_pp(A, b)
print("x =", x)

Note: Educational reference; prefer np.linalg.solve for speed and robustness.


Power Method (Dominant Eigenvalue/Eigenvector)

import numpy as np
def power_method(A, x0=None, tol=1e-10, maxit=1000):
n = A.shape[0]
x = np.random.default_rng(0).random(n) if x0 is None else np.array(x0, dtype=float)
x /= np.linalg.norm(x)
lam_old = 0.0
for _ in range(maxit):
y = A @ x
x = y / np.linalg.norm(y)
lam = x @ (A @ x)
if abs(lam - lam_old) < tol:
return lam, x
lam_old = lam
return lam, x
A = np.array([[2., 1.],
[1., 3.]])
lam, v = power_method(A)
print("Dominant eigenvalue ~", lam)
print("Eigenvector ~", v)

Note: Converges to the largest-magnitude eigenvalue if it is unique and dominant.


Finite Differences for 1D Poisson/BVP

import numpy as np
# Solve y'' = -pi^2 sin(pi x), y(0)=y(1)=0 on [0,1]
# Exact solution: y = sin(pi x)
n = 10
h = 1.0 / (n + 1)
x = np.linspace(h, 1-h, n)
f = -np.pi**2 * np.sin(np.pi * x)
# Tridiagonal system: (1/h^2)[-1, 2, -1]
A = np.zeros((n, n))
np.fill_diagonal(A, 2.0)
np.fill_diagonal(A[:-1,1:], -1.0)
np.fill_diagonal(A[1:,:-1], -1.0)
A /= h**2
y = np.linalg.solve(A, f)
print("x:", x)
print("y approx:", y)

Note: Classic second-order centered finite difference scheme for a simple BVP.
Adaptive Composite Simpson (Simple Error Control)

import numpy as np
def simpson_interval(f, a, b):
c = 0.5*(a+b)
return (b-a) * (f(a) + 4*f(c) + f(b)) / 6
def adaptive_simpson(f, a, b, tol=1e-8, max_depth=20, depth=0):
mid = 0.5*(a+b)
S = simpson_interval(f, a, b)
S_left = simpson_interval(f, a, mid)
S_right = simpson_interval(f, mid, b)
if depth >= max_depth or abs(S_left + S_right - S) < 15*tol:
return S_left + S_right + (S_left + S_right - S)/15
return adaptive_simpson(f, a, mid, tol/2, max_depth, depth+1) + \
adaptive_simpson(f, mid, b, tol/2, max_depth, depth+1)
f = lambda x: np.sin(x)/x if x!=0 else 1.0
print("Adaptive Simpson [1e-6, 10] ~", adaptive_simpson(f, 1e-6, 10.0, 1e-8))

Note: Simple recursive adaptive Simpson with error estimate via Richardson extrapolation.
Gradient Descent (Unconstrained Optimization)

import numpy as np
# Minimize f(x,y) = (x-1)^2 + 5(y+2)^2
def f(v):
x, y = v
return (x-1)**2 + 5*(y+2)**2
def grad_f(v):
x, y = v
return np.array([2*(x-1), 10*(y+2)])
def gradient_descent(x0, alpha=0.1, tol=1e-8, maxit=1000):
x = np.array(x0, dtype=float)
for k in range(maxit):
g = grad_f(x)
x_new = x - alpha * g
if np.linalg.norm(x_new - x) < tol:
return x_new, k+1
x = x_new
return x, maxit
xmin, iters = gradient_descent([0, 0], alpha=0.2)
print("xmin ~", xmin, "iters:", iters)

Note: Fixed-step gradient descent on a convex quadratic. For general problems, use line search

or SciPy optimize.
Moore Penrose Pseudoinverse

import numpy as np
A = np.array([[1., 2., 3.],
[4., 5., 6.]])
A_pinv = np.linalg.pinv(A)
print("A^+ =\n", A_pinv)
# Check A A^+ A = A
print("Check:", np.allclose(A @ A_pinv @ A, A))

Note: Use pseudoinverse for least squares solutions and for rank-deficient matrices.
FFT-based Convolution (Utility)

import numpy as np
def conv_fft(x, h):
n = len(x) + len(h) - 1
N = 1 << (n-1).bit_length() # next pow2
X = np.fft.rfft(x, N)
H = np.fft.rfft(h, N)
Y = X * H
y = np.fft.irfft(Y, N)[:n]
return y
x = np.array([1, 2, 3, 4], dtype=float)
h = np.array([1, -1, 1], dtype=float)
print("Convolution:", conv_fft(x, h))

Note: Handy for polynomial multiplication and smoothing; O(n log n).

You might also like