121 lines
3.5 KiB
Python
121 lines
3.5 KiB
Python
import os
|
|
from abc import ABC, abstractmethod
|
|
|
|
import boto3
|
|
from botocore.client import Config
|
|
|
|
|
|
class ChunkUploadBackend(ABC):
|
|
@abstractmethod
|
|
def start(self, filename: str) -> None:
|
|
"""Начало загрузки нового файла"""
|
|
|
|
@abstractmethod
|
|
def upload_chunk(self, chunk: bytes) -> None:
|
|
"""Загрузка очередного чанка"""
|
|
|
|
@abstractmethod
|
|
def finish(self) -> str:
|
|
"""Завершение загрузки"""
|
|
|
|
@abstractmethod
|
|
def abort(self) -> None:
|
|
"""Прерывания загрузки"""
|
|
|
|
|
|
class DiskChunkUploadBackend(ChunkUploadBackend):
|
|
def __init__(self, base_path: str):
|
|
self.base_path = base_path
|
|
os.makedirs(self.base_path, exist_ok=True)
|
|
self._file = None
|
|
|
|
def start(self, filename):
|
|
self._file = open(os.path.join(self.base_path, filename), "wb")
|
|
|
|
def upload_chunk(self, chunk: bytes):
|
|
self._file.write(chunk)
|
|
|
|
def finish(self):
|
|
if self._file is None:
|
|
return
|
|
self._file.close()
|
|
return os.path.join(self.base_path, self._file.name)
|
|
|
|
def abort(self):
|
|
if self._file:
|
|
self._file.close()
|
|
self._file = None
|
|
|
|
|
|
class S3ChunkUploadBackend(ChunkUploadBackend):
|
|
def __init__(self, key_prefix: str = ""):
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv(".env")
|
|
|
|
self.s3 = boto3.client(
|
|
service_name="s3",
|
|
aws_access_key_id=os.getenv("S3_ACCESS_KEY"),
|
|
aws_secret_access_key=os.getenv("S3_SECRET_KEY"),
|
|
endpoint_url=os.getenv("S3_ENDPOINT_URL"),
|
|
region_name=os.getenv("S3_REGION_NAME"),
|
|
use_ssl=True,
|
|
config=Config(signature_version=os.getenv("S3_SIGNATURE_VERSION")),
|
|
)
|
|
self.bucket = os.getenv("S3_BUCKET_NAME")
|
|
self.key_prefix = key_prefix
|
|
self.upload_id = None
|
|
self.parts = []
|
|
self.part_number = 1
|
|
self.buffer = bytearray()
|
|
self.multipart_threshold = 5 * 1024 * 1024 # 5MB
|
|
self.key = None
|
|
|
|
def start(self, filename: str) -> None:
|
|
self.key = f"{self.key_prefix}{filename}"
|
|
response = self.s3.create_multipart_upload(Bucket=self.bucket, Key=self.key)
|
|
self.upload_id = response["UploadId"]
|
|
|
|
def upload_chunk(self, chunk: bytes) -> None:
|
|
self.buffer.extend(chunk)
|
|
|
|
if len(self.buffer) >= self.multipart_threshold:
|
|
self._flush_part()
|
|
|
|
def _flush_part(self):
|
|
response = self.s3.upload_part(
|
|
Bucket=self.bucket,
|
|
Key=self.key,
|
|
PartNumber=self.part_number,
|
|
UploadId=self.upload_id,
|
|
Body=bytes(self.buffer),
|
|
)
|
|
|
|
self.parts.append({"PartNumber": self.part_number, "ETag": response["ETag"]})
|
|
|
|
self.part_number += 1
|
|
self.buffer.clear()
|
|
|
|
def finish(self):
|
|
if self.buffer:
|
|
self._flush_part()
|
|
# Сбрасываем счетчик
|
|
self.part_number = 1
|
|
response = self.s3.complete_multipart_upload(
|
|
Bucket=self.bucket,
|
|
Key=self.key,
|
|
UploadId=self.upload_id,
|
|
MultipartUpload={"Parts": self.parts},
|
|
)
|
|
# Сбрасываем части
|
|
self.parts = []
|
|
return response["Location"]
|
|
|
|
def abort(self) -> None:
|
|
if self.upload_id:
|
|
self.s3.abort_multipart_upload(
|
|
Bucket=self.bucket, Key=self.key, UploadId=self.upload_id
|
|
)
|
|
self.part_number = 1
|
|
self.parts = []
|