Claude Code Plugins

Community-maintained marketplace

Feedback

Perform ABC analysis for inventory classification. Use this skill when categorizing SKUs by value or velocity, applying Pareto principles, segmenting inventory for differential treatment, or prioritizing stock management efforts.

Install Skill

1Download skill
2Enable skills in Claude

Open claude.ai/settings/capabilities and find the "Skills" section

3Upload to Claude

Click "Upload skill" and select the downloaded ZIP file

Note: Please verify skill by going through its instructions before using it.

SKILL.md

name abc-analysis
description Perform ABC analysis for inventory classification. Use this skill when categorizing SKUs by value or velocity, applying Pareto principles, segmenting inventory for differential treatment, or prioritizing stock management efforts.

ABC Analysis

Classify inventory using ABC analysis based on value, velocity, or other metrics.

Installation

pip install numpy pandas

Quick Start

import pandas as pd
import numpy as np

def abc_classification(items, value_field='annual_value'):
    """
    Classify items into A, B, C categories using Pareto principle.
    A: Top 20% contributing ~80% of value
    B: Next 30% contributing ~15% of value
    C: Bottom 50% contributing ~5% of value
    """
    df = pd.DataFrame(items)
    df = df.sort_values(value_field, ascending=False)

    df['cumulative_value'] = df[value_field].cumsum()
    df['cumulative_pct'] = df['cumulative_value'] / df[value_field].sum()

    df['class'] = 'C'
    df.loc[df['cumulative_pct'] <= 0.80, 'class'] = 'A'
    df.loc[(df['cumulative_pct'] > 0.80) & (df['cumulative_pct'] <= 0.95), 'class'] = 'B'

    return df

Common Patterns

Multi-Criteria ABC Analysis

def multi_criteria_abc(items, criteria_weights):
    """
    ABC analysis using multiple weighted criteria.
    criteria_weights: {'revenue': 0.5, 'picks': 0.3, 'margin': 0.2}
    """
    df = pd.DataFrame(items)

    # Normalize each criterion
    for criterion in criteria_weights:
        max_val = df[criterion].max()
        if max_val > 0:
            df[f'{criterion}_norm'] = df[criterion] / max_val

    # Calculate weighted score
    df['composite_score'] = sum(
        df[f'{criterion}_norm'] * weight
        for criterion, weight in criteria_weights.items()
    )

    # Sort and classify
    df = df.sort_values('composite_score', ascending=False)
    df['cumulative_pct'] = df['composite_score'].cumsum() / df['composite_score'].sum()

    df['class'] = pd.cut(
        df['cumulative_pct'],
        bins=[0, 0.80, 0.95, 1.0],
        labels=['A', 'B', 'C']
    )

    return df

ABC-XYZ Matrix

def abc_xyz_analysis(items):
    """
    Combine ABC (value) and XYZ (demand variability) analysis.
    """
    df = pd.DataFrame(items)

    # ABC classification by value
    df = df.sort_values('annual_value', ascending=False)
    df['value_cum_pct'] = df['annual_value'].cumsum() / df['annual_value'].sum()
    df['abc'] = pd.cut(df['value_cum_pct'], bins=[0, 0.8, 0.95, 1], labels=['A', 'B', 'C'])

    # XYZ classification by coefficient of variation
    df['cv'] = df['demand_std'] / df['demand_mean']
    df['xyz'] = pd.cut(
        df['cv'],
        bins=[-np.inf, 0.5, 1.0, np.inf],
        labels=['X', 'Y', 'Z']
    )

    # Combined classification
    df['abc_xyz'] = df['abc'].astype(str) + df['xyz'].astype(str)

    return df

Velocity-Based ABC

def velocity_abc(items, pick_history, period_days=365):
    """
    Classify items by pick velocity (picks per period).
    """
    # Calculate velocity from pick history
    velocity = {}
    for pick in pick_history:
        sku = pick['sku_id']
        velocity[sku] = velocity.get(sku, 0) + pick['quantity']

    df = pd.DataFrame(items)
    df['velocity'] = df['sku_id'].map(velocity).fillna(0)
    df['daily_velocity'] = df['velocity'] / period_days

    # Classify by velocity
    df = df.sort_values('velocity', ascending=False)
    df['velocity_cum_pct'] = df['velocity'].cumsum() / df['velocity'].sum()

    df['velocity_class'] = 'C'
    df.loc[df['velocity_cum_pct'] <= 0.80, 'velocity_class'] = 'A'
    df.loc[(df['velocity_cum_pct'] > 0.80) & (df['velocity_cum_pct'] <= 0.95), 'velocity_class'] = 'B'

    return df

ABC Thresholds Optimization

def optimize_abc_thresholds(items, a_locations, b_locations, c_locations):
    """
    Optimize ABC thresholds based on available location capacity.
    """
    df = pd.DataFrame(items).sort_values('value', ascending=False)
    total_items = len(df)

    # Calculate thresholds based on location capacity
    a_pct = len(a_locations) / total_items
    b_pct = len(b_locations) / total_items

    a_threshold = df['value'].iloc[int(a_pct * total_items) - 1] if a_pct > 0 else 0
    b_threshold = df['value'].iloc[int((a_pct + b_pct) * total_items) - 1]

    df['class'] = 'C'
    df.loc[df['value'] >= a_threshold, 'class'] = 'A'
    df.loc[(df['value'] < a_threshold) & (df['value'] >= b_threshold), 'class'] = 'B'

    return df, {'A': a_threshold, 'B': b_threshold}

ABC Class Summary

def abc_summary_report(classified_df):
    """
    Generate summary statistics by ABC class.
    """
    summary = classified_df.groupby('class').agg({
        'sku_id': 'count',
        'annual_value': ['sum', 'mean'],
        'velocity': ['sum', 'mean']
    }).round(2)

    summary.columns = ['sku_count', 'total_value', 'avg_value',
                       'total_velocity', 'avg_velocity']

    # Calculate percentages
    totals = summary.sum()
    summary['value_pct'] = (summary['total_value'] / totals['total_value'] * 100).round(1)
    summary['sku_pct'] = (summary['sku_count'] / totals['sku_count'] * 100).round(1)
    summary['velocity_pct'] = (summary['total_velocity'] / totals['total_velocity'] * 100).round(1)

    return summary

Rolling ABC Analysis

def rolling_abc_analysis(items, pick_history, window_days=90):
    """
    Perform ABC analysis on rolling time window for dynamic classification.
    """
    from datetime import datetime, timedelta

    df = pd.DataFrame(items)
    picks_df = pd.DataFrame(pick_history)
    picks_df['date'] = pd.to_datetime(picks_df['date'])

    cutoff = datetime.now() - timedelta(days=window_days)
    recent_picks = picks_df[picks_df['date'] >= cutoff]

    # Calculate recent velocity
    recent_velocity = recent_picks.groupby('sku_id')['quantity'].sum()
    df['recent_velocity'] = df['sku_id'].map(recent_velocity).fillna(0)

    # Classify
    df = df.sort_values('recent_velocity', ascending=False)
    df['cum_pct'] = df['recent_velocity'].cumsum() / df['recent_velocity'].sum()

    df['current_class'] = pd.cut(
        df['cum_pct'],
        bins=[0, 0.80, 0.95, 1.0],
        labels=['A', 'B', 'C']
    )

    # Compare to previous classification
    if 'previous_class' in df.columns:
        df['class_change'] = df['current_class'] != df['previous_class']

    return df