Advanced Bash Examples
This section contains advanced Bash scripting examples that demonstrate complex concepts, patterns, and real-world applications.
Process Management and IPC
Process Pool Manager
#!/bin/bash
# process_pool.sh - Manage a pool of worker processes
MAX_WORKERS=${MAX_WORKERS:-4}
WORK_QUEUE="/tmp/work_queue.$$"
WORKER_PIDS=()
RUNNING=true
# Cleanup function
cleanup() {
RUNNING=false
echo "Shutting down workers..."
for pid in "${WORKER_PIDS[@]}"; do
if kill -0 "$pid" 2>/dev/null; then
kill "$pid"
fi
done
rm -f "$WORK_QUEUE"
exit 0
}
# Set up signal handlers
trap cleanup SIGINT SIGTERM
# Worker function
worker() {
local worker_id="$1"
echo "Worker $worker_id started (PID: $$)"
while $RUNNING; do
if [ -p "$WORK_QUEUE" ]; then
if read -t 1 task < "$WORK_QUEUE"; then
echo "Worker $worker_id processing: $task"
# Simulate work
sleep $((RANDOM % 5 + 1))
echo "Worker $worker_id completed: $task"
fi
fi
done
echo "Worker $worker_id shutting down"
}
# Create work queue (named pipe)
mkfifo "$WORK_QUEUE"
# Start worker processes
for ((i=1; i<=MAX_WORKERS; i++)); do
worker "$i" &
WORKER_PIDS+=($!)
done
echo "Started $MAX_WORKERS workers"
echo "Send tasks by writing to: $WORK_QUEUE"
echo "Press Ctrl+C to shutdown"
# Add some sample tasks
for task in "task1" "task2" "task3" "task4" "task5"; do
echo "$task" > "$WORK_QUEUE" &
done
# Wait for workers
wait
```#
## Distributed Task Runner
```bash
#!/bin/bash
# distributed_runner.sh - Run tasks across multiple servers
SERVERS_FILE="${SERVERS_FILE:-servers.txt}"
SSH_KEY="${SSH_KEY:-~/.ssh/id_rsa}"
PARALLEL_JOBS="${PARALLEL_JOBS:-3}"
TIMEOUT="${TIMEOUT:-300}"
# Server list format: user@hostname:port
declare -a SERVERS
# Load servers from file
load_servers() {
if [ ! -f "$SERVERS_FILE" ]; then
echo "Error: Servers file '$SERVERS_FILE' not found"
exit 1
fi
while IFS= read -r server; do
[[ $server =~ ^[[:space:]]*# ]] && continue
[[ -z $server ]] && continue
SERVERS+=("$server")
done < "$SERVERS_FILE"
echo "Loaded ${#SERVERS[@]} servers"
}
# Execute command on remote server
execute_remote() {
local server="$1"
local command="$2"
local output_file="$3"
local user_host="${server%:*}"
local port="${server##*:}"
[ "$port" = "$server" ] && port="22"
echo "Executing on $user_host: $command" >> "$output_file"
timeout "$TIMEOUT" ssh -i "$SSH_KEY" -p "$port" -o ConnectTimeout=10 \
-o StrictHostKeyChecking=no "$user_host" "$command" >> "$output_file" 2>&1
local exit_code=$?
echo "Exit code: $exit_code" >> "$output_file"
return $exit_code
}
# Run command on all servers in parallel
run_distributed() {
local command="$1"
local output_dir="results_$(date +%Y%m%d_%H%M%S)"
mkdir -p "$output_dir"
echo "Running command on ${#SERVERS[@]} servers: $command"
echo "Output directory: $output_dir"
local pids=()
local server_index=0
for server in "${SERVERS[@]}"; do
local output_file="$output_dir/server_${server_index}.log"
execute_remote "$server" "$command" "$output_file" &
pids+=($!)
((server_index++))
# Limit parallel jobs
if [ ${#pids[@]} -ge $PARALLEL_JOBS ]; then
wait "${pids[0]}"
pids=("${pids[@]:1}")
fi
done
# Wait for remaining jobs
for pid in "${pids[@]}"; do
wait "$pid"
done
echo "All tasks completed. Results in: $output_dir"
# Generate summary
generate_summary "$output_dir"
}
# Generate execution summary
generate_summary() {
local output_dir="$1"
local summary_file="$output_dir/summary.txt"
{
echo "Distributed Execution Summary"
echo "============================"
echo "Executed at: $(date)"
echo "Total servers: ${#SERVERS[@]}"
echo
local success_count=0
local failure_count=0
for log_file in "$output_dir"/server_*.log; do
local server_num=$(basename "$log_file" .log | cut -d_ -f2)
local server="${SERVERS[$server_num]}"
if grep -q "Exit code: 0" "$log_file"; then
echo "✓ $server - SUCCESS"
((success_count++))
else
echo "✗ $server - FAILED"
((failure_count++))
fi
done
echo
echo "Results: $success_count successful, $failure_count failed"
} > "$summary_file"
cat "$summary_file"
}
# Main execution
if [ $# -eq 0 ]; then
echo "Usage: $0 <command>"
echo "Environment variables:"
echo " SERVERS_FILE - File containing server list (default: servers.txt)"
echo " SSH_KEY - SSH private key (default: ~/.ssh/id_rsa)"
echo " PARALLEL_JOBS - Number of parallel jobs (default: 3)"
echo " TIMEOUT - Command timeout in seconds (default: 300)"
exit 1
fi
load_servers
run_distributed "$*"Network Programming
HTTP Server in Bash
#!/bin/bash
# http_server.sh - Simple HTTP server in pure Bash
PORT="${PORT:-8080}"
DOCUMENT_ROOT="${DOCUMENT_ROOT:-./www}"
LOG_FILE="${LOG_FILE:-access.log}"
# MIME types
declare -A MIME_TYPES=(
["html"]="text/html"
["css"]="text/css"
["js"]="application/javascript"
["json"]="application/json"
["txt"]="text/plain"
["png"]="image/png"
["jpg"]="image/jpeg"
["gif"]="image/gif"
)
# Log HTTP request
log_request() {
local client_ip="$1"
local method="$2"
local path="$3"
local status="$4"
local size="$5"
echo "$(date '+%d/%b/%Y:%H:%M:%S %z') $client_ip \"$method $path HTTP/1.1\" $status $size" >> "$LOG_FILE"
}
# Get MIME type from file extension
get_mime_type() {
local file="$1"
local extension="${file##*.}"
echo "${MIME_TYPES[$extension]:-application/octet-stream}"
}
# Send HTTP response
send_response() {
local status="$1"
local content_type="$2"
local content="$3"
local content_length="${#content}"
echo "HTTP/1.1 $status"
echo "Content-Type: $content_type"
echo "Content-Length: $content_length"
echo "Connection: close"
echo
echo -n "$content"
}
# Handle HTTP request
handle_request() {
local client_ip="$1"
# Read request line
read -r method path protocol
# Read headers (until empty line)
while read -r header && [ -n "$header" ]; do
header=$(echo "$header" | tr -d '\r')
# Process headers if needed
done
# Remove query string from path
path="${path%%\?*}"
# Security: prevent directory traversal
path="${path//..\/}"
# Default to index.html for directory requests
if [[ "$path" == */ ]]; then
path="${path}index.html"
fi
local file_path="$DOCUMENT_ROOT$path"
if [ -f "$file_path" ]; then
local content_type=$(get_mime_type "$file_path")
local content=$(cat "$file_path")
send_response "200 OK" "$content_type" "$content"
log_request "$client_ip" "$method" "$path" "200" "${#content}"
else
local error_content="<html><body><h1>404 Not Found</h1><p>The requested file was not found.</p></body></html>"
send_response "404 Not Found" "text/html" "$error_content"
log_request "$client_ip" "$method" "$path" "404" "${#error_content}"
fi
}
# Start server
start_server() {
echo "Starting HTTP server on port $PORT"
echo "Document root: $DOCUMENT_ROOT"
echo "Log file: $LOG_FILE"
# Create document root if it doesn't exist
mkdir -p "$DOCUMENT_ROOT"
# Create a simple index.html if it doesn't exist
if [ ! -f "$DOCUMENT_ROOT/index.html" ]; then
cat > "$DOCUMENT_ROOT/index.html" << EOF
<!DOCTYPE html>
<html>
<head>
<title>Bash HTTP Server</title>
</head>
<body>
<h1>Welcome to Bash HTTP Server</h1>
<p>Server started at: $(date)</p>
<p>This is a simple HTTP server written in Bash.</p>
</body>
</html>
EOF
fi
# Start listening
while true; do
# Use netcat to listen for connections
if command -v nc >/dev/null 2>&1; then
nc -l -p "$PORT" -e "$0" handle_connection
elif command -v socat >/dev/null 2>&1; then
socat TCP-LISTEN:$PORT,reuseaddr,fork EXEC:"$0 handle_connection"
else
echo "Error: netcat or socat required"
exit 1
fi
done
}
# Handle individual connection
handle_connection() {
local client_ip="${SOCAT_PEERADDR:-unknown}"
handle_request "$client_ip"
}
# Main execution
case "${1:-start}" in
start)
start_server
;;
handle_connection)
handle_connection
;;
*)
echo "Usage: $0 [start]"
exit 1
;;
esacNetwork Scanner
#!/bin/bash
# network_scanner.sh - Network discovery and port scanning
NETWORK="${1:-192.168.1.0/24}"
TIMEOUT="${TIMEOUT:-1}"
THREADS="${THREADS:-50}"
# Common ports to scan
COMMON_PORTS=(22 23 25 53 80 110 143 443 993 995 3389 5432 3306)
# Parse CIDR notation
parse_network() {
local network="$1"
local ip="${network%/*}"
local prefix="${network#*/}"
if [ "$prefix" = "$network" ]; then
# No CIDR notation, assume single host
echo "$ip"
return
fi
# Convert CIDR to IP range
local IFS='.'
local ip_parts=($ip)
local ip_int=$((${ip_parts[0]} << 24 | ${ip_parts[1]} << 16 | ${ip_parts[2]} << 8 | ${ip_parts[3]}))
local mask=$((0xFFFFFFFF << (32 - prefix)))
local network_int=$((ip_int & mask))
local broadcast_int=$((network_int | (0xFFFFFFFF >> prefix)))
for ((i=network_int+1; i<broadcast_int; i++)); do
printf "%d.%d.%d.%d\n" $((i >> 24 & 0xFF)) $((i >> 16 & 0xFF)) $((i >> 8 & 0xFF)) $((i & 0xFF))
done
}
# Ping sweep to discover live hosts
ping_sweep() {
local network="$1"
local output_file="/tmp/live_hosts.$$"
echo "Performing ping sweep on $network..."
parse_network "$network" | while read -r ip; do
{
if ping -c 1 -W "$TIMEOUT" "$ip" >/dev/null 2>&1; then
echo "$ip" >> "$output_file"
echo "Host alive: $ip"
fi
} &
# Limit concurrent processes
(($(jobs -r | wc -l) >= THREADS)) && wait
done
wait
if [ -f "$output_file" ]; then
sort -V "$output_file"
rm "$output_file"
fi
}
# Port scan a single host
port_scan() {
local host="$1"
local ports=("${@:2}")
echo "Scanning ports on $host..."
for port in "${ports[@]}"; do
{
if timeout "$TIMEOUT" bash -c "echo >/dev/tcp/$host/$port" 2>/dev/null; then
echo "$host:$port - OPEN"
# Try to identify service
local service=$(getent services "$port/tcp" 2>/dev/null | awk '{print $1}')
[ -n "$service" ] && echo " Service: $service"
fi
} &
# Limit concurrent processes
(($(jobs -r | wc -l) >= THREADS)) && wait
done
wait
}
# Comprehensive network scan
full_scan() {
local network="$1"
local output_dir="scan_results_$(date +%Y%m%d_%H%M%S)"
mkdir -p "$output_dir"
echo "Starting comprehensive network scan..."
echo "Network: $network"
echo "Output directory: $output_dir"
# Discover live hosts
echo "Phase 1: Host discovery"
local live_hosts=()
while IFS= read -r host; do
live_hosts+=("$host")
done < <(ping_sweep "$network")
echo "Found ${#live_hosts[@]} live hosts"
echo "${live_hosts[@]}" > "$output_dir/live_hosts.txt"
# Port scan each live host
echo "Phase 2: Port scanning"
for host in "${live_hosts[@]}"; do
echo "Scanning $host..."
port_scan "$host" "${COMMON_PORTS[@]}" > "$output_dir/${host}_ports.txt"
done
# Generate summary report
{
echo "Network Scan Report"
echo "=================="
echo "Scan date: $(date)"
echo "Network: $network"
echo "Live hosts: ${#live_hosts[@]}"
echo
echo "Host Summary:"
echo "============"
for host in "${live_hosts[@]}"; do
local open_ports=$(grep -c "OPEN" "$output_dir/${host}_ports.txt" 2>/dev/null || echo "0")
echo "$host - $open_ports open ports"
done
echo
echo "Open Ports by Service:"
echo "====================="
for port in "${COMMON_PORTS[@]}"; do
local hosts_with_port=()
for host in "${live_hosts[@]}"; do
if grep -q "$host:$port - OPEN" "$output_dir/${host}_ports.txt" 2>/dev/null; then
hosts_with_port+=("$host")
fi
done
if [ ${#hosts_with_port[@]} -gt 0 ]; then
local service=$(getent services "$port/tcp" 2>/dev/null | awk '{print $1}' || echo "unknown")
echo "Port $port ($service): ${hosts_with_port[*]}"
fi
done
} > "$output_dir/summary.txt"
echo "Scan completed. Results in: $output_dir"
cat "$output_dir/summary.txt"
}
# Main execution
case "${2:-full}" in
ping)
ping_sweep "$NETWORK"
;;
port)
if [ $# -lt 2 ]; then
echo "Usage: $0 <host> port [port...]"
exit 1
fi
port_scan "$1" "${COMMON_PORTS[@]}"
;;
full)
full_scan "$NETWORK"
;;
*)
echo "Usage: $0 <network> [ping|port|full]"
echo "Examples:"
echo " $0 192.168.1.0/24 ping # Ping sweep only"
echo " $0 192.168.1.1 port # Port scan single host"
echo " $0 192.168.1.0/24 full # Full network scan"
exit 1
;;
esacData Processing and Analysis
JSON Processor
#!/bin/bash
# json_processor.sh - JSON processing without external dependencies
# Simple JSON parser (basic implementation)
parse_json() {
local json="$1"
local key="$2"
# Remove whitespace
json=$(echo "$json" | tr -d ' \t\n\r')
# Extract value for key
local pattern="\"$key\":\"([^\"]*)\""
if [[ $json =~ $pattern ]]; then
echo "${BASH_REMATCH[1]}"
else
# Try numeric value
pattern="\"$key\":([0-9]+)"
if [[ $json =~ $pattern ]]; then
echo "${BASH_REMATCH[1]}"
fi
fi
}
# Extract array elements
parse_json_array() {
local json="$1"
local array_key="$2"
# Extract array content
local pattern="\"$array_key\":\[([^\]]*)\]"
if [[ $json =~ $pattern ]]; then
local array_content="${BASH_REMATCH[1]}"
# Split by comma and clean up
IFS=',' read -ra elements <<< "$array_content"
for element in "${elements[@]}"; do
element=$(echo "$element" | sed 's/^[[:space:]]*"//;s/"[[:space:]]*$//')
echo "$element"
done
fi
}
# Create JSON object
create_json() {
local -n data_ref=$1
local json="{"
local first=true
for key in "${!data_ref[@]}"; do
if [ "$first" = true ]; then
first=false
else
json+=","
fi
json+="\"$key\":\"${data_ref[$key]}\""
done
json+="}"
echo "$json"
}
# Process JSON file
process_json_file() {
local file="$1"
local operation="$2"
local key="$3"
if [ ! -f "$file" ]; then
echo "Error: File '$file' not found"
return 1
fi
local json_content=$(cat "$file")
case "$operation" in
get)
parse_json "$json_content" "$key"
;;
array)
parse_json_array "$json_content" "$key"
;;
keys)
# Extract all keys
echo "$json_content" | grep -o '"[^"]*"[[:space:]]*:' | sed 's/"//g;s/[[:space:]]*://'
;;
validate)
# Basic JSON validation
if echo "$json_content" | python3 -m json.tool >/dev/null 2>&1; then
echo "Valid JSON"
else
echo "Invalid JSON"
fi
;;
pretty)
# Pretty print JSON
if command -v python3 >/dev/null 2>&1; then
echo "$json_content" | python3 -m json.tool
else
echo "$json_content"
fi
;;
*)
echo "Unknown operation: $operation"
echo "Available operations: get, array, keys, validate, pretty"
return 1
;;
esac
}
# Example usage and testing
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
# Create sample JSON file for testing
cat > sample.json << 'EOF'
{
"name": "John Doe",
"age": 30,
"city": "New York",
"hobbies": ["reading", "swimming", "coding"],
"active": true,
"address": {
"street": "123 Main St",
"zip": "10001"
}
}
EOF
if [ $# -eq 0 ]; then
echo "JSON Processor Demo"
echo "=================="
echo
echo "Sample JSON content:"
cat sample.json
echo
echo "Extracting 'name':"
process_json_file sample.json get name
echo
echo "Extracting 'age':"
process_json_file sample.json get age
echo
echo "Extracting 'hobbies' array:"
process_json_file sample.json array hobbies
echo
echo "All keys:"
process_json_file sample.json keys
echo
echo "Validating JSON:"
process_json_file sample.json validate
else
process_json_file "$@"
fi
fiData Aggregator
#!/bin/bash
# data_aggregator.sh - Aggregate and analyze data from multiple sources
declare -A metrics
declare -A counters
OUTPUT_FORMAT="${OUTPUT_FORMAT:-table}"
# Add metric
add_metric() {
local key="$1"
local value="$2"
if [[ $value =~ ^[0-9]+(\.[0-9]+)?$ ]]; then
metrics["$key"]="${metrics["$key"]:-0}"
metrics["$key"]=$(echo "${metrics["$key"]} + $value" | bc -l)
counters["$key"]=$((${counters["$key"]:-0} + 1))
fi
}
# Process log file
process_log_file() {
local file="$1"
local format="$2"
echo "Processing log file: $file (format: $format)"
case "$format" in
apache)
# Apache access log format
while IFS= read -r line; do
# Extract response time if available
if [[ $line =~ ([0-9]+)$ ]]; then
add_metric "response_time" "${BASH_REMATCH[1]}"
fi
# Extract status code
if [[ $line =~ \"[^\"]*\"[[:space:]]+([0-9]{3}) ]]; then
add_metric "status_${BASH_REMATCH[1]}" 1
fi
# Extract bytes sent
if [[ $line =~ [[:space:]]([0-9]+)[[:space:]]+\"[^\"]*\"[[:space:]]*$ ]]; then
add_metric "bytes_sent" "${BASH_REMATCH[1]}"
fi
done < "$file"
;;
nginx)
# Nginx access log format
while IFS= read -r line; do
# Similar processing for nginx logs
if [[ $line =~ [[:space:]]([0-9]{3})[[:space:]] ]]; then
add_metric "status_${BASH_REMATCH[1]}" 1
fi
done < "$file"
;;
csv)
# CSV format - assume first line is header
local headers=()
local line_num=0
while IFS=',' read -ra fields; do
((line_num++))
if [ $line_num -eq 1 ]; then
headers=("${fields[@]}")
else
for i in "${!fields[@]}"; do
local header="${headers[i]}"
local value="${fields[i]}"
if [[ $value =~ ^[0-9]+(\.[0-9]+)?$ ]]; then
add_metric "$header" "$value"
fi
done
fi
done < "$file"
;;
*)
echo "Unknown format: $format"
return 1
;;
esac
}
# Calculate statistics
calculate_stats() {
declare -A averages
declare -A totals
for key in "${!metrics[@]}"; do
totals["$key"]="${metrics["$key"]}"
if [ "${counters["$key"]}" -gt 0 ]; then
averages["$key"]=$(echo "scale=2; ${metrics["$key"]} / ${counters["$key"]}" | bc -l)
fi
done
case "$OUTPUT_FORMAT" in
table)
printf "%-20s %15s %15s %10s\n" "Metric" "Total" "Average" "Count"
printf "%-20s %15s %15s %10s\n" "------" "-----" "-------" "-----"
for key in $(printf '%s\n' "${!metrics[@]}" | sort); do
printf "%-20s %15.2f %15.2f %10d\n" \
"$key" "${totals["$key"]}" "${averages["$key"]}" "${counters["$key"]}"
done
;;
json)
echo "{"
local first=true
for key in "${!metrics[@]}"; do
if [ "$first" = true ]; then
first=false
else
echo -n ","
fi
printf ' "%s": {"total": %.2f, "average": %.2f, "count": %d}' \
"$key" "${totals["$key"]}" "${averages["$key"]}" "${counters["$key"]}"
done
echo
echo "}"
;;
csv)
echo "metric,total,average,count"
for key in "${!metrics[@]}"; do
printf "%s,%.2f,%.2f,%d\n" \
"$key" "${totals["$key"]}" "${averages["$key"]}" "${counters["$key"]}"
done
;;
esac
}
# Generate report
generate_report() {
local output_file="$1"
{
echo "Data Aggregation Report"
echo "======================"
echo "Generated: $(date)"
echo "Files processed: ${#processed_files[@]}"
echo
calculate_stats
} > "$output_file"
echo "Report saved to: $output_file"
}
# Main processing function
main() {
local files=("$@")
local processed_files=()
if [ ${#files[@]} -eq 0 ]; then
echo "Usage: $0 <file1> [file2] ..."
echo "Environment variables:"
echo " OUTPUT_FORMAT - Output format: table, json, csv (default: table)"
exit 1
fi
for file in "${files[@]}"; do
if [ ! -f "$file" ]; then
echo "Warning: File '$file' not found, skipping"
continue
fi
# Detect file format based on extension or content
local format="apache" # default
case "$file" in
*.csv) format="csv" ;;
*nginx*) format="nginx" ;;
*apache*) format="apache" ;;
esac
process_log_file "$file" "$format"
processed_files+=("$file")
done
if [ ${#processed_files[@]} -eq 0 ]; then
echo "No files were processed"
exit 1
fi
echo
echo "Aggregation Results:"
echo "==================="
calculate_stats
# Generate detailed report
local report_file="aggregation_report_$(date +%Y%m%d_%H%M%S).txt"
generate_report "$report_file"
}
main "$@"Security and Encryption
Password Manager
#!/bin/bash
# password_manager.sh - Simple encrypted password manager
VAULT_FILE="${VAULT_FILE:-$HOME/.password_vault.gpg}"
TEMP_FILE="/tmp/vault_temp.$$"
# Cleanup function
cleanup() {
rm -f "$TEMP_FILE"
}
trap cleanup EXIT
# Check dependencies
check_dependencies() {
if ! command -v gpg >/dev/null 2>&1; then
echo "Error: GPG is required but not installed"
exit 1
fi
}
# Initialize vault
init_vault() {
if [ -f "$VAULT_FILE" ]; then
echo "Vault already exists at: $VAULT_FILE"
return 1
fi
echo "Initializing new password vault..."
echo "# Password Vault - Created $(date)" > "$TEMP_FILE"
echo "# Format: service:username:password:notes" >> "$TEMP_FILE"
encrypt_vault
echo "Vault initialized at: $VAULT_FILE"
}
# Encrypt vault
encrypt_vault() {
if ! gpg --symmetric --cipher-algo AES256 --output "$VAULT_FILE" "$TEMP_FILE"; then
echo "Error: Failed to encrypt vault"
exit 1
fi
}
# Decrypt vault
decrypt_vault() {
if [ ! -f "$VAULT_FILE" ]; then
echo "Error: Vault file not found. Run 'init' first."
exit 1
fi
if ! gpg --decrypt --output "$TEMP_FILE" "$VAULT_FILE" 2>/dev/null; then
echo "Error: Failed to decrypt vault (wrong password?)"
exit 1
fi
}
# Add password entry
add_entry() {
local service="$1"
local username="$2"
local password="$3"
local notes="$4"
if [ -z "$service" ] || [ -z "$username" ]; then
echo "Error: Service and username are required"
return 1
fi
decrypt_vault
# Check if entry already exists
if grep -q "^$service:$username:" "$TEMP_FILE"; then
echo "Entry already exists. Use 'update' to modify."
return 1
fi
# Generate password if not provided
if [ -z "$password" ]; then
password=$(generate_password)
echo "Generated password: $password"
fi
echo "$service:$username:$password:$notes" >> "$TEMP_FILE"
encrypt_vault
echo "Entry added for $service ($username)"
}
# Generate secure password
generate_password() {
local length="${1:-16}"
tr -dc 'A-Za-z0-9!@#$%^&*' < /dev/urandom | head -c "$length"
}
# Search entries
search_entries() {
local query="$1"
decrypt_vault
echo "Search results for: $query"
echo "=========================="
grep -i "$query" "$TEMP_FILE" | grep -v '^#' | while IFS=':' read -r service username password notes; do
echo "Service: $service"
echo "Username: $username"
echo "Password: [hidden - use 'show' to reveal]"
echo "Notes: $notes"
echo "---"
done
}
# Show specific entry
show_entry() {
local service="$1"
local username="$2"
decrypt_vault
local entry=$(grep "^$service:$username:" "$TEMP_FILE")
if [ -n "$entry" ]; then
IFS=':' read -r service username password notes <<< "$entry"
echo "Service: $service"
echo "Username: $username"
echo "Password: $password"
echo "Notes: $notes"
else
echo "Entry not found: $service ($username)"
fi
}
# List all entries
list_entries() {
decrypt_vault
echo "Password Vault Entries"
echo "====================="
grep -v '^#' "$TEMP_FILE" | while IFS=':' read -r service username password notes; do
echo "$service ($username)"
done
}
# Update entry
update_entry() {
local service="$1"
local username="$2"
local new_password="$3"
local new_notes="$4"
decrypt_vault
if ! grep -q "^$service:$username:" "$TEMP_FILE"; then
echo "Entry not found: $service ($username)"
return 1
fi
# Create updated file
local updated_file="/tmp/vault_updated.$$"
while IFS=':' read -r s u p n; do
if [ "$s" = "$service" ] && [ "$u" = "$username" ]; then
echo "$s:$u:${new_password:-$p}:${new_notes:-$n}"
else
echo "$s:$u:$p:$n"
fi
done < "$TEMP_FILE" > "$updated_file"
mv "$updated_file" "$TEMP_FILE"
encrypt_vault
echo "Entry updated: $service ($username)"
}
# Delete entry
delete_entry() {
local service="$1"
local username="$2"
decrypt_vault
if ! grep -q "^$service:$username:" "$TEMP_FILE"; then
echo "Entry not found: $service ($username)"
return 1
fi
# Create file without the entry
grep -v "^$service:$username:" "$TEMP_FILE" > "/tmp/vault_filtered.$$"
mv "/tmp/vault_filtered.$$" "$TEMP_FILE"
encrypt_vault
echo "Entry deleted: $service ($username)"
}
# Show usage
show_usage() {
cat << EOF
Password Manager - Encrypted password storage
Usage: $0 <command> [arguments]
Commands:
init Initialize new vault
add <service> <username> [password] [notes] Add new entry
show <service> <username> Show specific entry
search <query> Search entries
list List all entries
update <service> <username> [password] [notes] Update entry
delete <service> <username> Delete entry
generate [length] Generate secure password
Environment Variables:
VAULT_FILE Path to vault file (default: ~/.password_vault.gpg)
Examples:
$0 init
$0 add gmail john.doe@gmail.com
$0 show gmail john.doe@gmail.com
$0 search gmail
$0 generate 20
EOF
}
# Main execution
check_dependencies
case "${1:-help}" in
init)
init_vault
;;
add)
add_entry "$2" "$3" "$4" "$5"
;;
show)
show_entry "$2" "$3"
;;
search)
search_entries "$2"
;;
list)
list_entries
;;
update)
update_entry "$2" "$3" "$4" "$5"
;;
delete)
delete_entry "$2" "$3"
;;
generate)
generate_password "$2"
;;
help|--help|-h)
show_usage
;;
*)
echo "Unknown command: $1"
show_usage
exit 1
;;
esacThese advanced examples demonstrate sophisticated Bash scripting techniques including process management, network programming, data processing, and security implementations. Each example showcases real-world applications and advanced programming patterns that can be adapted for various use cases.