| name | python-profiling |
| description | Profile and benchmark Python code to measure execution time and identify bottlenecks. Use when analyzing performance, comparing implementations, or diagnosing slow code. |
Python Profiling
Basic Timing
import time
start = time.perf_counter()
result = my_function(data)
elapsed = time.perf_counter() - start
print(f"Time: {elapsed:.4f}s")
Benchmarking
import timeit
time_taken = timeit.timeit(
lambda: my_function(data),
number=100
)
print(f"Average: {time_taken / 100:.6f}s")
Function Profiling
import cProfile, pstats
profiler = cProfile.Profile()
profiler.enable()
result = my_function(data)
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats(20)
CLI: python -m cProfile -s cumulative script.py
Numba-Specific
from numba import njit, prange
@njit
def my_func(arr):
return arr.sum()
my_func(np.array([1, 2, 3])) # Compile
print(my_func.signatures) # View types
# Parallel diagnostics
@njit(parallel=True)
def parallel_func(arr):
result = 0.0
for i in prange(len(arr)):
result += arr[i]
return result
parallel_func.parallel_diagnostics(level=4)
Benchmark JIT Functions
def benchmark_numba(func, data, n_warmup=3, n_runs=10):
for _ in range(n_warmup):
func(data) # Warm-up
times = []
for _ in range(n_runs):
start = time.perf_counter()
func(data)
times.append(time.perf_counter() - start)
return np.mean(times), np.std(times)
Quick Decorator
import functools
def profile_time(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
print(f"{func.__name__}: {time.perf_counter() - start:.4f}s")
return result
return wrapper