| name | SQL Ecosystem |
| description | This skill should be used when working with SQL databases, "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE TABLE", "JOIN", "INDEX", "EXPLAIN", transactions, or database migrations. Provides comprehensive SQL patterns across PostgreSQL, MySQL, and SQLite. |
-- String types CHAR(n), VARCHAR(n), TEXT
-- Date/Time types DATE, TIME, TIMESTAMP, INTERVAL
-- Boolean BOOLEAN
-- MySQL specific TINYINT, MEDIUMINT ENUM('value1', 'value2'), SET('a', 'b', 'c') JSON (stored as text internally)
-- SQLite specific -- Uses type affinity: TEXT, INTEGER, REAL, BLOB, NULL -- Any type name accepted but mapped to affinity
CONSTRAINT email_format CHECK (email ~* '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$')
);
CREATE TABLE orders ( id SERIAL PRIMARY KEY, user_id INTEGER NOT NULL, total DECIMAL(10, 2) NOT NULL, status VARCHAR(20) DEFAULT 'pending',
CONSTRAINT fk_user FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
-- Add column with default (PostgreSQL 11+ is instant) ALTER TABLE users ADD COLUMN active BOOLEAN DEFAULT true;
-- Rename column ALTER TABLE users RENAME COLUMN name TO full_name;
-- Add constraint ALTER TABLE users ADD CONSTRAINT unique_phone UNIQUE (phone);
-- Drop constraint ALTER TABLE users DROP CONSTRAINT unique_phone;
-- Unique index CREATE UNIQUE INDEX idx_users_email_unique ON users(email);
-- Composite index (order matters for query optimization) CREATE INDEX idx_orders_user_status ON orders(user_id, status);
-- Partial index (PostgreSQL) CREATE INDEX idx_active_users ON users(email) WHERE active = true;
-- Expression index (PostgreSQL) CREATE INDEX idx_users_lower_email ON users(LOWER(email));
-- Aggregate with grouping SELECT status, COUNT() as count, SUM(total) as revenue FROM orders WHERE created_at >= '2024-01-01' GROUP BY status HAVING COUNT() > 10 ORDER BY revenue DESC;
-- Multi-row insert INSERT INTO users (email, name) VALUES ('user1@example.com', 'User One'), ('user2@example.com', 'User Two'), ('user3@example.com', 'User Three');
-- Insert with returning (PostgreSQL) INSERT INTO users (email, name) VALUES ('new@example.com', 'New User') RETURNING id, created_at;
-- Upsert (PostgreSQL) INSERT INTO users (email, name) VALUES ('user@example.com', 'Updated Name') ON CONFLICT (email) DO UPDATE SET name = EXCLUDED.name;
-- Upsert (MySQL) INSERT INTO users (email, name) VALUES ('user@example.com', 'Updated Name') ON DUPLICATE KEY UPDATE name = VALUES(name);
-- Update with subquery UPDATE orders SET status = 'cancelled' WHERE user_id IN (SELECT id FROM users WHERE active = false);
-- Update with join (PostgreSQL) UPDATE orders o SET status = 'vip' FROM users u WHERE o.user_id = u.id AND u.vip = true;
-- Update with returning (PostgreSQL) UPDATE users SET active = false WHERE id = 1 RETURNING *;
-- Delete with subquery DELETE FROM orders WHERE user_id IN (SELECT id FROM users WHERE active = false);
-- Truncate (faster for all rows, resets sequences) TRUNCATE TABLE logs; TRUNCATE TABLE logs RESTART IDENTITY; -- PostgreSQL
-- Soft delete pattern (prefer this) UPDATE users SET deleted_at = NOW() WHERE id = 1;
-- PostgreSQL with pg (Node.js) client.query( 'SELECT * FROM users WHERE email = $1 AND status = $2', [userEmail, status] )
-- MySQL with mysql-connector (Python) cursor.execute( "SELECT * FROM users WHERE email = %s AND status = %s", (user_email, status) )
-- MySQL with mysql2 (Node.js) connection.execute( 'SELECT * FROM users WHERE email = ? AND status = ?', [userEmail, status] )
-- SQLite with sqlite3 (Python) cursor.execute( "SELECT * FROM users WHERE email = ? AND status = ?", (user_email, status) )
-- Go with database/sql db.Query( "SELECT _ FROM users WHERE email = $1 AND status = $2", userEmail, status )
-- If user_input = "'; DROP TABLE users; --" -- Executes: SELECT * FROM users WHERE email = ''; DROP TABLE users; --'
-- SAFE: Escape wildcards before using in LIKE -- Python: escaped = userinput.replace('%', '\%').replace('', '\_') -- Then use parameterized query: cursor.execute( "SELECT * FROM products WHERE name LIKE %s", ('%' + escaped_input + '%',) )
-- PostgreSQL: Use ESCAPE clause explicitly SELECT * FROM products WHERE name LIKE '%' || $1 || '%' ESCAPE '';
-- Alternative: Use position() or strpos() for exact matching SELECT * FROM products WHERE position($1 in name) > 0;
-- SAFE: Whitelist allowed values (Python example) ALLOWED_COLUMNS = {'id', 'name', 'email', 'created_at'} ALLOWED_TABLES = {'users', 'products', 'orders'}
if column_name not in ALLOWED_COLUMNS: raise ValueError(f"Invalid column: {column_name}") if table_name not in ALLOWED_TABLES: raise ValueError(f"Invalid table: {table_name}")
-- PostgreSQL: Use quote_ident() for identifiers SELECT quote_ident($1) FROM quote_ident($2);
-- Use identifier quoting as additional protection query = f'SELECT "{column_name}" FROM "{table_name}"'
-- Foreign Key FOREIGN KEY (user_id) REFERENCES users(id) FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL
-- Unique UNIQUE (email) UNIQUE (user_id, product_id) -- composite unique
-- Check CHECK (price > 0) CHECK (status IN ('pending', 'active', 'completed'))
-- Not Null NOT NULL
-- Default DEFAULT CURRENT_TIMESTAMP DEFAULT 'pending'
UNION ALL
-- Recursive case: subordinates
SELECT e.id, e.name, e.manager_id, ot.level + 1
FROM employees e
INNER JOIN org_tree ot ON e.manager_id = ot.id
) SELECT * FROM org_tree ORDER BY level, name;
-- Partition by user SELECT user_id, total, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY created_at DESC) as order_num FROM orders;
-- 1NF compliant: separate rows CREATE TABLE order_items ( order_id INTEGER, product_id INTEGER, PRIMARY KEY (order_id, product_id) );
-- 2NF compliant: separate product table CREATE TABLE products ( id INTEGER PRIMARY KEY, name TEXT ); CREATE TABLE order_items ( order_id INTEGER, product_id INTEGER REFERENCES products(id), quantity INTEGER, PRIMARY KEY (order_id, product_id) );
-- 3NF compliant: separate locations CREATE TABLE locations ( zip_code TEXT PRIMARY KEY, city TEXT ); CREATE TABLE users ( id INTEGER PRIMARY KEY, name TEXT, zip_code TEXT REFERENCES locations(zip_code) );
CONSTRAINT unique_active_email UNIQUE (email) WHERE deleted_at IS NULL
);
-- Query active records SELECT * FROM users WHERE deleted_at IS NULL;
-- Auto-update trigger (PostgreSQL) CREATE OR REPLACE FUNCTION update_updated_at() RETURNS TRIGGER AS $$ BEGIN NEW.updated_at = CURRENT_TIMESTAMP; RETURN NEW; END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER orders_updated_at BEFORE UPDATE ON orders FOR EACH ROW EXECUTE FUNCTION update_updated_at();
INDEX idx_commentable (commentable_type, commentable_id)
);
INSERT INTO order_statuses (name) VALUES ('pending'), ('processing'), ('shipped'), ('delivered'), ('cancelled');
CREATE TABLE orders ( id SERIAL PRIMARY KEY, status_id INTEGER REFERENCES order_statuses(id) );
CREATE TABLE user_roles ( user_id INTEGER REFERENCES users(id) ON DELETE CASCADE, role_id INTEGER REFERENCES roles(id) ON DELETE CASCADE, granted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (user_id, role_id) );
-- MySQL EXPLAIN SELECT * FROM users WHERE email = 'test@example.com'; EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@example.com'; -- MySQL 8.0.18+
-- SQLite EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = 'test@example.com';
-- Key metrics to watch cost=startup..total -- Estimated cost units rows=N -- Estimated row count actual time=X..Y -- Real execution time (with ANALYZE) loops=N -- Number of iterations
-- Covering index avoids table lookup CREATE INDEX idx_users_email_name ON users(email, name);
-- Leftmost prefix rule: this index supports: -- WHERE user_id = ? -- WHERE user_id = ? AND status = ? -- But NOT: WHERE status = ?
-- Only index recent orders CREATE INDEX idx_recent_orders ON orders(created_at) WHERE created_at > '2024-01-01';
-- Good: only needed columns SELECT id, name, email FROM users WHERE id = 1;
-- Good: stops at first match SELECT EXISTS(SELECT 1 FROM orders WHERE user_id = 1);
-- Good: batch insert INSERT INTO logs (message) VALUES ('log1'), ('log2'), ('log3');
-- Good: batch update with CASE UPDATE products SET price = CASE id WHEN 1 THEN 10.00 WHEN 2 THEN 20.00 WHEN 3 THEN 30.00 END WHERE id IN (1, 2, 3);
-- Keyset pagination (efficient for large datasets) SELECT * FROM orders WHERE id > 1000 -- last seen ID ORDER BY id LIMIT 20;
-- Cursor-based with composite key SELECT * FROM orders WHERE (created_at, id) > ('2024-01-01', 1000) ORDER BY created_at, id LIMIT 20;
-- Good: UNION allows index usage on each condition SELECT * FROM users WHERE email = 'a@b.com' UNION SELECT * FROM users WHERE name = 'John';
-- Transaction fails if constraint violated BEGIN; UPDATE accounts SET balance = balance - 1000 WHERE id = 1; -- Fails if balance < 1000 COMMIT;
BEGIN; -- Protected from concurrent modifications SELECT balance FROM accounts WHERE id = 1; UPDATE accounts SET balance = balance - 100 WHERE id = 1; COMMIT;
-- NOWAIT: fail immediately if locked SELECT * FROM accounts WHERE id = 1 FOR UPDATE NOWAIT;
-- SKIP LOCKED: skip locked rows (queue processing) SELECT * FROM jobs WHERE status = 'pending' FOR UPDATE SKIP LOCKED LIMIT 1;
-- Transaction-level lock (auto-released on commit) SELECT pg_advisory_xact_lock(12345);
-- Try lock (non-blocking) SELECT pg_try_advisory_lock(12345); -- returns true/false
-- Read with version SELECT id, name, price, version FROM products WHERE id = 1; -- version = 5
-- Update with version check UPDATE products SET price = 29.99, version = version + 1 WHERE id = 1 AND version = 5;
-- If rows affected = 0, conflict occurred -> retry or error
-- MySQL SET innodb_lock_wait_timeout = 5;
-- Sequential 001_create_users_table.sql 002_add_email_to_users.sql
-- +migrate Up CREATE TABLE users ( id SERIAL PRIMARY KEY, email VARCHAR(255) NOT NULL );
-- +migrate Down DROP TABLE users;
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
ALTER TABLE users ADD COLUMN IF NOT EXISTS name VARCHAR(100);
-- Step 2: Backfill data (in batches) UPDATE users SET phone = '' WHERE phone IS NULL AND id BETWEEN 1 AND 1000;
-- Step 3: Add NOT NULL constraint ALTER TABLE users ALTER COLUMN phone SET NOT NULL;
-- Older versions: requires table rewrite -- Use nullable + backfill + NOT NULL pattern instead
-- Step 2: Copy data (in batches) UPDATE users SET full_name = name WHERE full_name IS NULL;
-- Step 3: Deploy code reading both columns
-- Step 4: Deploy code writing to both columns
-- Step 5: Deploy code reading only new column
-- Step 6: Drop old column ALTER TABLE users DROP COLUMN name;
-- Note: Cannot run inside transaction -- May take longer but allows concurrent reads/writes
-- Step 2: Deploy and wait for old code to drain
-- Step 3: Drop column ALTER TABLE users DROP COLUMN old_column;
DECLARE batch_size INTEGER := 1000; rows_updated INTEGER; BEGIN LOOP UPDATE users SET email_normalized = LOWER(email) WHERE email_normalized IS NULL AND id IN ( SELECT id FROM users WHERE email_normalized IS NULL LIMIT batch_size );
GET DIAGNOSTICS rows_updated = ROW_COUNT;
EXIT WHEN rows_updated = 0;
COMMIT;
PERFORM pg_sleep(0.1); -- Reduce load
END LOOP;
END $$;
FETCH 1000 FROM batch_cursor; -- Process batch -- Repeat until no more rows