| name | efficiency-metrics |
| description | Calculate and analyze warehouse picking efficiency metrics. Use this skill when measuring picker productivity, analyzing travel vs pick time ratios, benchmarking performance, or optimizing operational KPIs. |
Efficiency Metrics
Calculate and analyze warehouse picking efficiency metrics and KPIs.
Installation
pip install numpy pandas matplotlib
Quick Start
import numpy as np
import pandas as pd
# Sample picking data
picks = [
{'picker': 'P001', 'order': 'ORD001', 'items': 5, 'time_sec': 180},
{'picker': 'P001', 'order': 'ORD002', 'items': 3, 'time_sec': 120},
{'picker': 'P002', 'order': 'ORD003', 'items': 8, 'time_sec': 300}
]
# Calculate picks per hour
total_items = sum(p['items'] for p in picks)
total_time_hours = sum(p['time_sec'] for p in picks) / 3600
picks_per_hour = total_items / total_time_hours
Core Productivity Metrics
class PickerProductivity:
def __init__(self, pick_records):
self.records = pd.DataFrame(pick_records)
def picks_per_hour(self, picker_id=None):
"""Calculate picks (lines) per hour."""
data = self.records
if picker_id:
data = data[data['picker'] == picker_id]
total_picks = len(data)
total_hours = data['time_sec'].sum() / 3600
return total_picks / total_hours if total_hours > 0 else 0
def units_per_hour(self, picker_id=None):
"""Calculate units picked per hour."""
data = self.records
if picker_id:
data = data[data['picker'] == picker_id]
total_units = data['quantity'].sum()
total_hours = data['time_sec'].sum() / 3600
return total_units / total_hours if total_hours > 0 else 0
def orders_per_hour(self, picker_id=None):
"""Calculate orders completed per hour."""
data = self.records
if picker_id:
data = data[data['picker'] == picker_id]
unique_orders = data['order'].nunique()
total_hours = data['time_sec'].sum() / 3600
return unique_orders / total_hours if total_hours > 0 else 0
def average_pick_time(self, picker_id=None):
"""Calculate average time per pick in seconds."""
data = self.records
if picker_id:
data = data[data['picker'] == picker_id]
return data['time_sec'].mean()
Travel Efficiency Metrics
def calculate_travel_ratio(picks_with_travel):
"""
Calculate ratio of travel time to total time.
Lower is better - indicates less time walking.
"""
total_travel = sum(p['travel_time_sec'] for p in picks_with_travel)
total_time = sum(p['total_time_sec'] for p in picks_with_travel)
return {
'travel_ratio': total_travel / total_time if total_time > 0 else 0,
'pick_ratio': 1 - (total_travel / total_time) if total_time > 0 else 0,
'total_travel_minutes': total_travel / 60,
'total_pick_minutes': (total_time - total_travel) / 60
}
def calculate_distance_metrics(route_data):
"""Calculate distance-based efficiency metrics."""
total_distance = sum(r['distance'] for r in route_data)
total_picks = sum(r['num_picks'] for r in route_data)
return {
'total_distance_meters': total_distance,
'distance_per_pick': total_distance / total_picks if total_picks > 0 else 0,
'avg_route_distance': np.mean([r['distance'] for r in route_data]),
'max_route_distance': max(r['distance'] for r in route_data),
'min_route_distance': min(r['distance'] for r in route_data)
}
def aisle_coverage_efficiency(route_data, warehouse_aisles):
"""Measure how efficiently routes cover aisles."""
metrics = []
for route in route_data:
aisles_visited = len(set(p['aisle'] for p in route['picks']))
picks_per_aisle = len(route['picks']) / aisles_visited
metrics.append({
'route_id': route['id'],
'aisles_visited': aisles_visited,
'picks_per_aisle': picks_per_aisle,
'aisle_utilization': aisles_visited / warehouse_aisles
})
return metrics
Order Accuracy Metrics
def calculate_accuracy_metrics(picks, errors):
"""Calculate picking accuracy metrics."""
total_picks = len(picks)
error_count = len(errors)
# Categorize errors
error_types = {}
for error in errors:
err_type = error['type']
error_types[err_type] = error_types.get(err_type, 0) + 1
return {
'total_picks': total_picks,
'total_errors': error_count,
'accuracy_rate': (total_picks - error_count) / total_picks * 100,
'error_rate': error_count / total_picks * 100,
'errors_by_type': error_types,
'dpmo': error_count / total_picks * 1_000_000 # Defects per million
}
def calculate_perfect_order_rate(orders):
"""Calculate percentage of orders shipped without any issues."""
perfect_orders = sum(1 for o in orders if o['is_perfect'])
return perfect_orders / len(orders) * 100 if orders else 0
Batch Efficiency Metrics
def analyze_batch_efficiency(batches):
"""Analyze efficiency of order batching."""
metrics = []
for batch in batches:
orders_in_batch = len(batch['orders'])
total_items = sum(o['items'] for o in batch['orders'])
distance = batch['route_distance']
metrics.append({
'batch_id': batch['id'],
'orders': orders_in_batch,
'items': total_items,
'distance': distance,
'distance_per_order': distance / orders_in_batch,
'distance_per_item': distance / total_items,
'items_per_order': total_items / orders_in_batch
})
return {
'batch_metrics': metrics,
'avg_orders_per_batch': np.mean([m['orders'] for m in metrics]),
'avg_items_per_batch': np.mean([m['items'] for m in metrics]),
'avg_distance_per_order': np.mean([m['distance_per_order'] for m in metrics]),
'avg_distance_per_item': np.mean([m['distance_per_item'] for m in metrics])
}
Time-Based Metrics
def calculate_time_metrics(pick_sessions):
"""Calculate time-based efficiency metrics."""
metrics = []
for session in pick_sessions:
login_time = session['login_time']
logout_time = session['logout_time']
total_duration = (logout_time - login_time).total_seconds() / 3600
productive_time = session['picking_time_hours']
idle_time = total_duration - productive_time
metrics.append({
'session_id': session['id'],
'picker': session['picker'],
'total_hours': total_duration,
'productive_hours': productive_time,
'idle_hours': idle_time,
'utilization_rate': productive_time / total_duration * 100
})
return {
'session_metrics': metrics,
'avg_utilization': np.mean([m['utilization_rate'] for m in metrics]),
'total_productive_hours': sum(m['productive_hours'] for m in metrics),
'total_idle_hours': sum(m['idle_hours'] for m in metrics)
}
Benchmarking
class PerformanceBenchmark:
def __init__(self):
self.benchmarks = {
'picks_per_hour': {'good': 100, 'excellent': 150, 'world_class': 200},
'accuracy_rate': {'good': 99.0, 'excellent': 99.5, 'world_class': 99.9},
'travel_ratio': {'good': 0.50, 'excellent': 0.40, 'world_class': 0.30},
'utilization_rate': {'good': 75, 'excellent': 85, 'world_class': 95}
}
def evaluate(self, metric_name, value):
"""Evaluate a metric against benchmarks."""
if metric_name not in self.benchmarks:
return 'unknown'
thresholds = self.benchmarks[metric_name]
# For metrics where lower is better
if metric_name == 'travel_ratio':
if value <= thresholds['world_class']:
return 'world_class'
elif value <= thresholds['excellent']:
return 'excellent'
elif value <= thresholds['good']:
return 'good'
return 'needs_improvement'
# For metrics where higher is better
if value >= thresholds['world_class']:
return 'world_class'
elif value >= thresholds['excellent']:
return 'excellent'
elif value >= thresholds['good']:
return 'good'
return 'needs_improvement'
def generate_scorecard(self, metrics):
"""Generate performance scorecard."""
scorecard = {}
for metric_name, value in metrics.items():
scorecard[metric_name] = {
'value': value,
'rating': self.evaluate(metric_name, value),
'benchmarks': self.benchmarks.get(metric_name, {})
}
return scorecard
Dashboard Metrics
def generate_daily_dashboard(date, pick_data, order_data):
"""Generate daily operations dashboard metrics."""
return {
'date': date,
'summary': {
'total_orders': len(order_data),
'total_picks': len(pick_data),
'total_units': sum(p['quantity'] for p in pick_data)
},
'productivity': {
'picks_per_hour': calculate_pph(pick_data),
'units_per_hour': calculate_uph(pick_data),
'orders_per_hour': calculate_oph(order_data)
},
'quality': {
'accuracy_rate': calculate_accuracy(pick_data),
'perfect_order_rate': calculate_por(order_data)
},
'efficiency': {
'travel_ratio': calculate_travel_ratio(pick_data),
'utilization_rate': calculate_utilization(pick_data)
}
}