3.1 Partial Derivatives and Total Differentials
📐 Definition: Partial Derivative
The partial derivative of multivariable function f(x, y) with respect to x is: $$\frac{\partial f}{\partial x} = \lim_{h \to 0} \frac{f(x+h, y) - f(x, y)}{h}$$ Differentiate with respect to one variable while keeping other variables fixed.
The partial derivative of multivariable function f(x, y) with respect to x is: $$\frac{\partial f}{\partial x} = \lim_{h \to 0} \frac{f(x+h, y) - f(x, y)}{h}$$ Differentiate with respect to one variable while keeping other variables fixed.
💻 Code Example 1: Numerical Calculation of Partial Derivatives
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def f(x, y):
"""Two-variable function: f(x,y) = x² + xy + y²"""
return x**2 + x*y + y**2
def partial_x(f, x, y, h=1e-5):
"""Numerical calculation of ∂f/∂x"""
return (f(x+h, y) - f(x-h, y)) / (2*h)
def partial_y(f, x, y, h=1e-5):
"""Numerical calculation of ∂f/∂y"""
return (f(x, y+h) - f(x, y-h)) / (2*h)
# Partial derivatives at point (1, 2)
x0, y0 = 1, 2
df_dx = partial_x(f, x0, y0)
df_dy = partial_y(f, x0, y0)
print(f"Partial derivatives at point ({x0}, {y0}):")
print(f"∂f/∂x = {df_dx:.6f} (analytical solution: {2*x0 + y0})")
print(f"∂f/∂y = {df_dy:.6f} (analytical solution: {x0 + 2*y0})")
# 3D visualization
x = np.linspace(-3, 3, 50)
y = np.linspace(-3, 3, 50)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, alpha=0.7, cmap='viridis')
ax.scatter([x0], [y0], [f(x0, y0)], color='red', s=100)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f(x,y)')
ax.set_title('f(x,y) = x² + xy + y²')
plt.show()
💻 Code Example 2: Partial Derivatives Using SymPy
import sympy as sp
x, y, z = sp.symbols('x y z')
# Partial derivatives of various multivariable functions
f1 = x**2 + y**2
f2 = x*sp.exp(y)
f3 = sp.sin(x*y)
f4 = x**2 * y + y**2 * z + z**2 * x
functions = [f1, f2, f3, f4]
print("Examples of partial derivatives:")
for f in functions:
print(f"\nf = {f}")
vars = list(f.free_symbols)
for var in vars:
print(f" ∂f/∂{var} = {sp.diff(f, var)}")
3.2 Gradient and Jacobian Matrix
📐 Definition: Gradient
The gradient of scalar field f(x, y, z) is a vector with partial derivatives as components: $$\nabla f = \left( \frac{\partial f}{\partial x}, \frac{\partial f}{\partial y}, \frac{\partial f}{\partial z} \right)$$ The gradient vector points in the direction of steepest increase of the function.
The gradient of scalar field f(x, y, z) is a vector with partial derivatives as components: $$\nabla f = \left( \frac{\partial f}{\partial x}, \frac{\partial f}{\partial y}, \frac{\partial f}{\partial z} \right)$$ The gradient vector points in the direction of steepest increase of the function.
💻 Code Example 3: Implementation of Gradient Descent Method
def gradient(f, x, y, h=1e-5):
"""Numerical calculation of gradient vector"""
grad_x = (f(x+h, y) - f(x-h, y)) / (2*h)
grad_y = (f(x, y+h) - f(x, y-h)) / (2*h)
return np.array([grad_x, grad_y])
def gradient_descent(f, x0, y0, learning_rate=0.1, n_iter=50):
"""Minimum search using gradient descent"""
path = [(x0, y0)]
x, y = x0, y0
for i in range(n_iter):
grad = gradient(f, x, y)
x -= learning_rate * grad[0]
y -= learning_rate * grad[1]
path.append((x, y))
return np.array(path)
# Search for minimum of f(x,y) = x² + y²
f_simple = lambda x, y: x**2 + y**2
path = gradient_descent(f_simple, x0=3, y0=2, learning_rate=0.2, n_iter=20)
print("Minimum search using gradient descent:")
print(f"Starting point: ({path[0,0]:.2f}, {path[0,1]:.2f})")
print(f"Ending point: ({path[-1,0]:.6f}, {path[-1,1]:.6f})")
print(f"Minimum value: f = {f_simple(path[-1,0], path[-1,1]):.6f}")
# Visualization
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f_simple(X, Y)
plt.figure(figsize=(10, 8))
plt.contour(X, Y, Z, levels=20, cmap='viridis', alpha=0.6)
plt.colorbar(label='f(x,y)')
plt.plot(path[:,0], path[:,1], 'ro-', linewidth=2, markersize=6)
plt.scatter([path[0,0]], [path[0,1]], color='green', s=150, marker='*', label='Starting point')
plt.scatter([path[-1,0]], [path[-1,1]], color='red', s=150, marker='*', label='Ending point')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Optimization Using Gradient Descent')
plt.legend()
plt.grid(True, alpha=0.3)
plt.axis('equal')
plt.show()
3.3 Lagrange Multiplier Method
📐 Theorem: Lagrange Multiplier Method
The problem of optimizing f(x, y) under constraint g(x, y) = 0 is reduced to finding stationary points of the Lagrange function L(x, y, λ) = f(x, y) - λg(x, y).
The problem of optimizing f(x, y) under constraint g(x, y) = 0 is reduced to finding stationary points of the Lagrange function L(x, y, λ) = f(x, y) - λg(x, y).
💻 Code Example 4: Constrained Optimization Problem
from scipy.optimize import minimize
# Problem: Minimize x² + y², constraint x + y = 1
def objective(X):
x, y = X
return x**2 + y**2
def constraint(X):
x, y = X
return x + y - 1 # x + y = 1
# Specify constraint in dictionary format
cons = {'type': 'eq', 'fun': constraint}
# Execute optimization
x0 = [0, 0] # Initial value
result = minimize(objective, x0, method='SLSQP', constraints=cons)
print("Results of constrained optimization:")
print(f"Optimal solution: x = {result.x[0]:.6f}, y = {result.x[1]:.6f}")
print(f"Minimum value: f = {result.fun:.6f}")
print(f"Constraint verification: x + y = {result.x[0] + result.x[1]:.6f}")
# Comparison with analytical solution (analytical solution for this problem is x = y = 0.5)
print(f"\nAnalytical solution: x = y = 0.5, f = 0.5")
print(f"Error: {abs(result.x[0] - 0.5):.2e}")
3.4 Multiple Integrals (Double Integrals, Triple Integrals)
💻 Code Example 5: Numerical Calculation of Double Integrals
from scipy import integrate
# ∫∫_D xy dxdy, D: 0 ≤ x ≤ 1, 0 ≤ y ≤ 2
def integrand(y, x):
"""Integrand f(x,y) = xy"""
return x * y
# dblquad: double integral
result, error = integrate.dblquad(integrand,
0, 1, # x range
0, 2) # y range
print("Double integral calculation:")
print(f"∫₀¹ ∫₀² xy dy dx = {result:.6f}")
print(f"Analytical solution: [x²/2]₀¹ · [y²/2]₀² = 0.5 · 2 = 1.0")
print(f"Estimated error: {error:.2e}")
💻 Code Example 6: Double Integral Using Polar Coordinate Transformation
# ∫∫ (x² + y²) dxdy over disk x² + y² ≤ 1
# Polar coordinate transformation: x = r cos θ, y = r sin θ, dxdy = r dr dθ
def integrand_polar(theta, r):
"""Integrand in polar coordinates: r² · r = r³"""
return r**3
# Integration range: 0 ≤ r ≤ 1, 0 ≤ θ ≤ 2π
result, error = integrate.dblquad(integrand_polar,
0, 1, # r range
0, 2*np.pi) # θ range
print("\nDouble integral using polar coordinate transformation:")
print(f"∫∫_D (x² + y²) dxdy = {result:.6f}")
print(f"Analytical solution: ∫₀^2π dθ ∫₀¹ r³ dr = 2π · [r⁴/4]₀¹ = π/2 ≈ {np.pi/2:.6f}")
print(f"Error: {abs(result - np.pi/2):.2e}")
💻 Code Example 7: Application to Materials Science (Concentration Distribution Integration)
# Impurity concentration distribution on circular wafer
def concentration(r, theta):
"""
Concentration distribution C(r,θ) [atoms/cm³]
Concentration decreases with distance from center
"""
r_max = 5.0 # Wafer radius [cm]
C0 = 1e15 # Center concentration [atoms/cm³]
return C0 * np.exp(-r**2 / r_max**2)
# Calculate total impurity atoms in entire wafer
R = 5.0 # Wafer radius [cm]
def integrand_concentration(theta, r):
return concentration(r, theta) * r # Multiply by Jacobian r
total_atoms, error = integrate.dblquad(integrand_concentration,
0, R,
0, 2*np.pi)
print("\nApplication to materials science:")
print(f"Wafer radius: {R} cm")
print(f"Center concentration: {1e15:.2e} atoms/cm³")
print(f"Total impurity atoms: {total_atoms:.4e} atoms")
print(f"Average concentration: {total_atoms / (np.pi * R**2):.4e} atoms/cm³")
# Visualization of concentration distribution
r_plot = np.linspace(0, R, 100)
theta_plot = np.linspace(0, 2*np.pi, 100)
R_grid, Theta_grid = np.meshgrid(r_plot, theta_plot)
C_grid = concentration(R_grid, Theta_grid)
# Convert polar to Cartesian coordinates
X_grid = R_grid * np.cos(Theta_grid)
Y_grid = R_grid * np.sin(Theta_grid)
plt.figure(figsize=(10, 8))
plt.contourf(X_grid, Y_grid, C_grid, levels=20, cmap='hot')
plt.colorbar(label='Concentration (atoms/cm³)')
plt.xlabel('x (cm)')
plt.ylabel('y (cm)')
plt.title('Impurity Concentration Distribution on Wafer')
plt.axis('equal')
plt.grid(True, alpha=0.3)
plt.show()
Summary
- Partial derivatives represent the rate of change of multivariable functions with respect to each variable
- The gradient vector indicates the direction of steepest increase and is utilized in optimization
- Constrained optimization problems can be solved using Lagrange multipliers
- Multiple integrals calculate accumulated quantities of multivariable functions, often simplified using polar coordinate transformations
- In materials science, they are applied to integration calculations of concentration distributions, temperature distributions, etc.