第264集:云存储集成

教学目标

  • 理解云存储的类型和特点
  • 掌握对象存储的使用方法
  • 熟悉块存储和文件存储的配置
  • 学习数据库存储的集成
  • 能够实现存储的备份和恢复

核心知识点

1. 云存储概述

1.1 云存储类型

存储类型 描述 特点 适用场景
对象存储 存储非结构化数据 无限扩展、高可用、低成本 图片、视频、备份文件
块存储 提供块级别的存储设备 低延迟、高性能 数据库、应用数据
文件存储 提供文件系统接口 共享访问、易于使用 共享文件、Web内容
归档存储 长期存储冷数据 低成本、检索延迟高 合规备份、历史数据

1.2 主流云存储服务对比

云平台 对象存储 块存储 文件存储 归档存储
AWS S3 EBS EFS Glacier
Azure Blob Storage Disk Files Archive
GCP Cloud Storage Persistent Disk Filestore Coldline
阿里云 OSS 云盘 NAS 归档存储

2. AWS存储服务

2.1 S3对象存储

# 安装AWS CLI
pip install awscli

# 配置AWS凭证
aws configure

# 创建S3存储桶
aws s3 mb s3://my-unique-bucket-name

# 上传文件
aws s3 cp file.txt s3://my-unique-bucket-name/

# 下载文件
aws s3 cp s3://my-unique-bucket-name/file.txt .

# 列出存储桶内容
aws s3 ls s3://my-unique-bucket-name/

# 同步目录
aws s3 sync ./local-dir s3://my-unique-bucket-name/remote-dir/

# 设置存储桶策略
cat > bucket-policy.json << 'EOF'
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Sid": "PublicReadGetObject",
      "Effect": "Allow",
      "Principal": "*",
      "Action": "s3:GetObject",
      "Resource": "arn:aws:s3:::my-unique-bucket-name/*"
    }
  ]
}
EOF

aws s3api put-bucket-policy --bucket my-unique-bucket-name --policy file://bucket-policy.json

# 配置生命周期策略
cat > lifecycle-policy.json << 'EOF'
{
  "Rules": [
    {
      "ID": "DeleteOldVersions",
      "Status": "Enabled",
      "Prefix": "",
      "NoncurrentVersionExpiration": {
        "NoncurrentDays": 30
      },
      "AbortIncompleteMultipartUpload": {
        "DaysAfterInitiation": 7
      }
    }
  ]
}
EOF

aws s3api put-bucket-lifecycle-configuration --bucket my-unique-bucket-name --lifecycle-configuration file://lifecycle-policy.json

# 启用版本控制
aws s3api put-bucket-versioning --bucket my-unique-bucket-name --versioning-configuration Status=Enabled

# 设置存储桶加密
aws s3api put-bucket-encryption --bucket my-unique-bucket-name --server-side-encryption-configuration '{
  "Rules": [
    {
      "ApplyServerSideEncryptionByDefault": {
        "SSEAlgorithm": "AES256"
      }
    }
  ]
}'

2.2 EBS块存储

# 创建EBS卷
aws ec2 create-volume \
  --availability-zone us-east-1a \
  --volume-type gp3 \
  --size 20

# 查看可用卷
aws ec2 describe-volumes

# 附加卷到实例
aws ec2 attach-volume \
  --volume-id vol-1234567890abcdef0 \
  --instance-id i-1234567890abcdef0 \
  --device /dev/sdf

# 在实例上格式化并挂载卷
sudo mkfs -t ext4 /dev/xvdf
sudo mkdir /data
sudo mount /dev/xvdf /data

# 添加到/etc/fstab实现自动挂载
echo '/dev/xvdf /data ext4 defaults,nofail 0 2' | sudo tee -a /etc/fstab

# 创建快照
aws ec2 create-snapshot --volume-id vol-1234567890abcdef0 --description "Daily backup"

# 从快照创建卷
aws ec2 create-volume --snapshot-id snap-1234567890abcdef0 --availability-zone us-east-1a

# 删除快照
aws ec2 delete-snapshot --snapshot-id snap-1234567890abcdef0

2.3 EFS文件存储

# 创建EFS文件系统
aws efs create-file-system --creation-token my-efs

# 创建挂载目标
aws efs create-mount-target \
  --file-system-id fs-12345678 \
  --subnet-id subnet-12345678 \
  --security-group-ids sg-12345678

# 安装EFS客户端
sudo yum install -y amazon-efs-utils

# 挂载EFS
sudo mkdir /mnt/efs
sudo mount -t efs fs-12345678:/ /mnt/efs

# 添加到/etc/fstab
echo 'fs-12345678:/ /mnt/efs efs defaults,_netdev 0 0' | sudo tee -a /etc/fstab

3. Azure存储服务

3.1 Blob存储

# 安装Azure CLI
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash

# 登录Azure
az login

# 创建存储账户
az storage account create \
  --name mystorageaccount \
  --resource-group myResourceGroup \
  --location eastus \
  --sku Standard_LRS \
  --kind StorageV2

# 获取存储账户密钥
STORAGE_KEY=$(az storage account keys list \
  --resource-group myResourceGroup \
  --account-name mystorageaccount \
  --query '[0].value' -o tsv)

# 创建容器
az storage container create \
  --name mycontainer \
  --account-name mystorageaccount \
  --account-key $STORAGE_KEY

# 上传文件
az storage blob upload \
  --container-name mycontainer \
  --name myfile.txt \
  --file myfile.txt \
  --account-name mystorageaccount \
  --account-key $STORAGE_KEY

# 下载文件
az storage blob download \
  --container-name mycontainer \
  --name myfile.txt \
  --file downloaded.txt \
  --account-name mystorageaccount \
  --account-key $STORAGE_KEY

# 列出Blob
az storage blob list \
  --container-name mycontainer \
  --account-name mystorageaccount \
  --account-key $STORAGE_KEY

# 生成SAS令牌
az storage blob generate-sas \
  --container-name mycontainer \
  --name myfile.txt \
  --permissions r \
  --expiry 2024-12-31 \
  --account-name mystorageaccount \
  --account-key $STORAGE_KEY

3.2 Azure Disk

# 创建托管磁盘
az disk create \
  --resource-group myResourceGroup \
  --name myManagedDisk \
  --size-gb 20 \
  --sku Standard_LRS

# 附加磁盘到VM
az vm disk attach \
  --resource-group myResourceGroup \
  --vm-name myVM \
  --name myManagedDisk

# 在VM上初始化磁盘
sudo fdisk /dev/sdc
sudo mkfs -t ext4 /dev/sdc1
sudo mkdir /data
sudo mount /dev/sdc1 /data

# 创建快照
az snapshot create \
  --resource-group myResourceGroup \
  --name mySnapshot \
  --source /subscriptions/{subscription-id}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/myManagedDisk

# 从快照创建磁盘
az disk create \
  --resource-group myResourceGroup \
  --name newDisk \
  --source mySnapshot

4. GCP存储服务

4.1 Cloud Storage

# 安装gcloud CLI
curl https://sdk.cloud.google.com | bash
exec -l $SHELL
gcloud init

# 创建存储桶
gsutil mb -p my-project-id gs://my-unique-bucket-name

# 上传文件
gsutil cp file.txt gs://my-unique-bucket-name/

# 下载文件
gsutil cp gs://my-unique-bucket-name/file.txt .

# 列出存储桶内容
gsutil ls gs://my-unique-bucket-name/

# 同步目录
gsutil -m rsync -r ./local-dir gs://my-unique-bucket-name/remote-dir/

# 设置ACL
gsutil acl ch -u AllUsers:R gs://my-unique-bucket-name/file.txt

# 设置生命周期策略
cat > lifecycle.json << 'EOF'
{
  "lifecycle": {
    "rule": [
      {
        "action": {
          "type": "Delete"
        },
        "condition": {
          "age": 30,
          "matchesStorageClass": ["NEARLINE"]
        }
      }
    ]
  }
}
EOF

gsutil lifecycle set lifecycle.json gs://my-unique-bucket-name/

# 设置存储类别
gsutil storageclass set NEARLINE gs://my-unique-bucket-name/file.txt

4.2 Persistent Disk

# 创建持久化磁盘
gcloud compute disks create my-disk \
  --size=20GB \
  --zone=us-central1-a \
  --type=pd-standard

# 附加磁盘到实例
gcloud compute instances attach-disk my-instance \
  --disk=my-disk \
  --zone=us-central1-a

# 在实例上格式化并挂载磁盘
sudo mkfs -t ext4 /dev/sdb
sudo mkdir /data
sudo mount /dev/sdb /data

# 添加到/etc/fstab
echo '/dev/sdb /data ext4 defaults,nofail 0 2' | sudo tee -a /etc/fstab

# 创建快照
gcloud compute disks snapshot my-disk \
  --snapshot-names=my-snapshot \
  --zone=us-central1-a

# 从快照创建磁盘
gcloud compute disks create new-disk \
  --source-snapshot=my-snapshot \
  --zone=us-central1-a

5. 存储备份和恢复

5.1 自动化备份脚本

#!/bin/bash
# backup.sh

BACKUP_DIR="/backups"
S3_BUCKET="s3://my-backup-bucket"
DATE=$(date +%Y%m%d_%H%M%S)

# 创建备份目录
mkdir -p $BACKUP_DIR

# 备份数据库
mysqldump -u root -p$DB_PASSWORD mydb > $BACKUP_DIR/database_$DATE.sql

# 备份应用文件
tar -czf $BACKUP_DIR/files_$DATE.tar.gz /var/www/myapp

# 上传到S3
aws s3 cp $BACKUP_DIR/database_$DATE.sql $S3_BUCKET/database/
aws s3 cp $BACKUP_DIR/files_$DATE.tar.gz $S3_BUCKET/files/

# 清理本地备份(保留最近7天)
find $BACKUP_DIR -type f -mtime +7 -delete

# 清理S3旧备份(保留最近30天)
aws s3 ls $S3_BUCKET/database/ | while read -r line; do
  createDate=$(echo $line | awk '{print $1" "$2}')
  createDate=$(date -d "$createDate" +%s)
  olderThan=$(date -d "-30 days" +%s)
  if [[ $createDate -lt $olderThan ]]; then
    fileName=$(echo $line | awk '{print $4}')
    aws s3 rm "$S3_BUCKET/database/$fileName"
  fi
done

echo "Backup completed: $DATE"

5.2 恢复脚本

#!/bin/bash
# restore.sh

S3_BUCKET="s3://my-backup-bucket"
BACKUP_DATE=$1

if [ -z "$BACKUP_DATE" ]; then
  echo "Usage: $0 <backup_date>"
  echo "Example: $0 20240101_120000"
  exit 1
fi

# 下载备份
aws s3 cp $S3_BUCKET/database/database_$BACKUP_DATE.sql /tmp/
aws s3 cp $S3_BUCKET/files/files_$BACKUP_DATE.tar.gz /tmp/

# 恢复数据库
mysql -u root -p$DB_PASSWORD mydb < /tmp/database_$BACKUP_DATE.sql

# 恢复应用文件
tar -xzf /tmp/files_$BACKUP_DATE.tar.gz -C /

# 重启服务
systemctl restart nginx
systemctl restart php-fpm

echo "Restore completed: $BACKUP_DATE"

6. 存储性能优化

6.1 S3性能优化

# 使用多部分上传提高大文件传输速度
aws configure set default.s3.max_concurrent_requests 20
aws configure set default.s3.multipart_threshold 64MB
aws configure set default.s3.multipart_chunksize 16MB

# 使用S3 Transfer Acceleration
aws s3api put-bucket-accelerate-configuration \
  --bucket my-unique-bucket-name \
  --accelerate-configuration Status=Enabled

# 使用加速端点上传
aws s3 cp largefile.txt s3://my-unique-bucket-name/ --endpoint-url https://my-unique-bucket-name.s3-accelerate.amazonaws.com

6.2 EBS性能优化

# 使用IO1卷类型获得更高IOPS
aws ec2 create-volume \
  --availability-zone us-east-1a \
  --volume-type io1 \
  --size 100 \
  --iops 5000

# 优化文件系统
sudo tune2fs -o journal_data_writeback /dev/xvdf
sudo tune2fs -O ^has_journal /dev/xvdf

# 使用RAID提高性能
sudo mdadm --create /dev/md0 --level=0 --raid-devices=4 /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi
sudo mkfs -t ext4 /dev/md0
sudo mount /dev/md0 /data

实用案例分析

案例1:构建高可用文件存储系统

场景描述

使用EFS构建一个高可用的共享文件存储系统,为多个EC2实例提供共享存储。

实施步骤

  1. 创建EFS文件系统
# 创建EFS文件系统
EFS_ID=$(aws efs create-file-system \
  --creation-token my-efs \
  --performance-mode generalPurpose \
  --throughput-mode bursting \
  --query 'FileSystemId' \
  --output text)

echo "EFS ID: $EFS_ID"

# 创建挂载目标(多个可用区)
SUBNETS=("subnet-12345678" "subnet-87654321")
SECURITY_GROUP="sg-12345678"

for subnet in "${SUBNETS[@]}"; do
  aws efs create-mount-target \
    --file-system-id $EFS_ID \
    --subnet-id $subnet \
    --security-group-ids $SECURITY_GROUP
done

# 等待挂载目标可用
aws efs describe-mount-targets --file-system-id $EFS_ID --query 'MountTargets[*].[MountTargetId,LifeCycleState]' --output table
  1. 配置自动挂载
# 安装EFS客户端
sudo yum install -y amazon-efs-utils

# 创建挂载点
sudo mkdir -p /mnt/shared

# 创建systemd挂载单元
sudo cat > /etc/systemd/system/mnt-shared.mount << 'EOF'
[Unit]
Description=Mount EFS file system
After=network-online.target
Wants=network-online.target

[Mount]
What=$EFS_ID:/
Where=/mnt/shared
Type=efs
_DefaultOptions=_netdev,tls,iam

[Install]
WantedBy=multi-user.target
EOF

# 启用并启动挂载
sudo systemctl daemon-reload
sudo systemctl enable mnt-shared.mount
sudo systemctl start mnt-shared.mount

# 验证挂载
df -h | grep /mnt/shared
  1. 配置备份策略
# 创建备份脚本
cat > /usr/local/bin/efs-backup.sh << 'EOF'
#!/bin/bash

EFS_ID=$1
S3_BUCKET=$2
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_NAME="efs-backup-$DATE"

# 创建EFS到S3的备份任务
aws backup create-backup-vault \
  --backup-vault-name efs-backup-vault \
  --encryption-key-arn arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012

# 创建备份计划
aws backup create-backup-plan \
  --backup-plan '{
    "BackupPlanName": "EFS-Backup-Plan",
    "Rules": [
      {
        "RuleName": "DailyBackup",
        "ScheduleExpression": "cron(0 2 * * ? *)",
        "StartWindowMinutes": 60,
        "CompletionWindowMinutes": 180,
        "Lifecycle": {
          "DeleteAfterDays": 30
        },
        "CopyActions": [
          {
            "DestinationBackupVaultArn": "arn:aws:backup:us-east-1:123456789012:backup-vault:efs-backup-vault",
            "Lifecycle": {
              "DeleteAfterDays": 90
            }
          }
        ],
        "TargetBackupVault": "efs-backup-vault"
      }
    ]
  }'

echo "Backup plan created"
EOF

chmod +x /usr/local/bin/efs-backup.sh

# 执行备份
/usr/local/bin/efs-backup.sh $EFS_ID my-backup-bucket

案例2:实现跨区域数据同步

场景描述

使用S3跨区域复制实现数据的异地备份和灾难恢复。

实施步骤

  1. 配置源存储桶
# 创建源存储桶
aws s3 mb s3://source-bucket-primary --region us-east-1

# 启用版本控制
aws s3api put-bucket-versioning \
  --bucket source-bucket-primary \
  --versioning-configuration Status=Enabled

# 配置生命周期策略
cat > lifecycle-policy.json << 'EOF'
{
  "Rules": [
    {
      "ID": "MoveToIA",
      "Status": "Enabled",
      "Prefix": "",
      "Transition": {
        "Days": 30,
        "StorageClass": "STANDARD_IA"
      }
    },
    {
      "ID": "MoveToGlacier",
      "Status": "Enabled",
      "Prefix": "",
      "Transition": {
        "Days": 90,
        "StorageClass": "GLACIER"
      }
    }
  ]
}
EOF

aws s3api put-bucket-lifecycle-configuration \
  --bucket source-bucket-primary \
  --lifecycle-configuration file://lifecycle-policy.json
  1. 配置目标存储桶
# 创建目标存储桶
aws s3 mb s3://destination-bucket-secondary --region us-west-2

# 启用版本控制
aws s3api put-bucket-versioning \
  --bucket destination-bucket-secondary \
  --versioning-configuration Status=Enabled

# 配置存储桶策略(允许源存储桶复制)
cat > destination-policy.json << 'EOF'
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Sid": "AllowReplication",
      "Effect": "Allow",
      "Principal": {
        "AWS": "arn:aws:iam::123456789012:role/replication-role"
      },
      "Action": [
        "s3:GetBucketVersioning",
        "s3:PutBucketVersioning",
        "s3:ReplicateObject",
        "s3:ReplicateDelete",
        "s3:ObjectOwnerOverrideToBucketOwner"
      ],
      "Resource": [
        "arn:aws:s3:::destination-bucket-secondary",
        "arn:aws:s3:::destination-bucket-secondary/*"
      ]
    }
  ]
}
EOF

aws s3api put-bucket-policy \
  --bucket destination-bucket-secondary \
  --policy file://destination-policy.json
  1. 配置跨区域复制
# 创建IAM角色
cat > trust-policy.json << 'EOF'
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "s3.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
EOF

ROLE_ARN=$(aws iam create-role \
  --role-name replication-role \
  --assume-role-policy-document file://trust-policy.json \
  --query 'Role.Arn' \
  --output text)

# 附加权限策略
cat > permissions-policy.json << 'EOF'
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "s3:GetBucketVersioning",
        "s3:PutBucketVersioning",
        "s3:ReplicateObject",
        "s3:ReplicateDelete",
        "s3:ReplicateTags",
        "s3:GetObjectRetention",
        "s3:GetObjectLegalHold",
        "s3:BypassGovernanceRetention"
      ],
      "Resource": [
        "arn:aws:s3:::source-bucket-primary",
        "arn:aws:s3:::source-bucket-primary/*",
        "arn:aws:s3:::destination-bucket-secondary",
        "arn:aws:s3:::destination-bucket-secondary/*"
      ]
    }
  ]
}
EOF

aws iam put-role-policy \
  --role-name replication-role \
  --policy-name replication-permissions \
  --policy-document file://permissions-policy.json

# 配置复制规则
cat > replication-config.json << 'EOF'
{
  "Role": "arn:aws:iam::123456789012:role/replication-role",
  "Rules": [
    {
      "ID": "ReplicationRule",
      "Priority": 1,
      "Status": "Enabled",
      "Filter": {},
      "Destination": {
        "Bucket": "arn:aws:s3:::destination-bucket-secondary",
        "StorageClass": "STANDARD",
        "Account": "123456789012"
      },
      "DeleteMarkerReplication": {
        "Status": "Enabled"
      }
    }
  ]
}
EOF

aws s3api put-bucket-replication \
  --bucket source-bucket-primary \
  --replication-configuration file://replication-config.json

# 验证复制状态
aws s3api get-bucket-replication --bucket source-bucket-primary
  1. 监控复制状态
# 创建监控脚本
cat > monitor-replication.sh << 'EOF'
#!/bin/bash

SOURCE_BUCKET="source-bucket-primary"
DEST_BUCKET="destination-bucket-secondary"

echo "Checking replication status..."

# 获取源存储桶对象列表
SOURCE_OBJECTS=$(aws s3api list-objects-v2 \
  --bucket $SOURCE_BUCKET \
  --query 'Contents[*].Key' \
  --output text)

# 获取目标存储桶对象列表
DEST_OBJECTS=$(aws s3api list-objects-v2 \
  --bucket $DEST_BUCKET \
  --query 'Contents[*].Key' \
  --output text)

# 比较对象数量
SOURCE_COUNT=$(echo "$SOURCE_OBJECTS" | wc -w)
DEST_COUNT=$(echo "$DEST_OBJECTS" | wc -w)

echo "Source bucket objects: $SOURCE_COUNT"
echo "Destination bucket objects: $DEST_COUNT"

# 检查未复制的对象
for obj in $SOURCE_OBJECTS; do
  if ! echo "$DEST_OBJECTS" | grep -q "$obj"; then
    echo "Not replicated: $obj"
  fi
done
EOF

chmod +x monitor-replication.sh

# 定期执行监控
echo "0 */6 * * * /usr/local/bin/monitor-replication.sh >> /var/log/replication-monitor.log 2>&1" | sudo crontab -

课后练习

  1. 基础练习

    • 创建一个S3存储桶并上传文件
    • 创建一个EBS卷并附加到EC2实例
    • 使用EFS创建共享文件系统
  2. 进阶练习

    • 配置S3生命周期策略
    • 实现自动备份和恢复脚本
    • 配置跨区域复制
  3. 挑战练习

    • 构建高可用的存储架构
    • 实现存储性能优化
    • 设计灾难恢复方案
  4. 思考问题

    • 如何选择合适的存储类型?
    • 如何优化存储成本?
    • 如何确保数据的安全性?

总结

本集详细介绍了Linux系统中云存储的集成方法,包括对象存储、块存储、文件存储、数据库存储以及存储备份和恢复等内容。通过本集的学习,您应该能够:

  • 理解云存储的类型和特点
  • 掌握对象存储的使用方法
  • 熟悉块存储和文件存储的配置
  • 学习数据库存储的集成
  • 能够实现存储的备份和恢复

云存储是现代应用架构的重要组成部分,它提供了可扩展、高可用、低成本的数据存储解决方案。在实际项目中,应根据数据访问模式、性能要求和成本预算选择合适的存储类型,并建立完善的备份、监控和灾难恢复机制,以确保数据的安全性和可用性。

« 上一篇 基础设施即代码 下一篇 » 云网络配置