feat: use opendal for uploading.

This commit is contained in:
Yufan Sheng 2024-07-08 20:48:41 +08:00
parent f128cc81b6
commit 3ac1810435
Signed by: syhily
GPG Key ID: DEB186763C308C31
5 changed files with 690 additions and 2043 deletions

View File

@ -1,6 +1,7 @@
# Astro Uploader
A uploader for uploading the Astro generated files through the S3 API.
This uploader is based on the [Apache OpenDAL™](https://github.com/apache/opendal). If you have any issues in uploading, it could be the issues in OpenDAL, remember to upgrade the OpenDAL to the latest version.
## Installation
@ -56,5 +57,7 @@ type Options = {
accessKey: string;
// The secret access key.
secretAccessKey: string;
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option.
extraOptions?: Record<string, string>
};
```

28
biome.json Normal file
View File

@ -0,0 +1,28 @@
{
"$schema": "https://biomejs.dev/schemas/1.8.3/schema.json",
"formatter": {
"enabled": true,
"formatWithErrors": false,
"indentStyle": "space",
"indentWidth": 2,
"lineEnding": "lf",
"lineWidth": 120,
"attributePosition": "auto"
},
"organizeImports": { "enabled": true },
"linter": { "enabled": true, "rules": { "recommended": true } },
"javascript": {
"formatter": {
"jsxQuoteStyle": "double",
"quoteProperties": "asNeeded",
"trailingCommas": "all",
"semicolons": "always",
"arrowParentheses": "always",
"bracketSpacing": true,
"bracketSameLine": false,
"quoteStyle": "single",
"attributePosition": "auto"
}
},
"overrides": [{ "include": ["**/*.astro"] }]
}

2325
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -32,24 +32,18 @@
],
"scripts": {
"build": "unbuild",
"format": "biome format src --write",
"stub": "unbuild --stub"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.600.0",
"mime": "^4.0.3",
"rimraf": "^5.0.7"
"mime": "^4.0.4",
"opendal": "^0.47.0",
"rimraf": "^5.0.8"
},
"devDependencies": {
"@types/node": "^20.14.9",
"astro": "^4.11.3",
"@biomejs/biome": "^1.8.3",
"@types/node": "^20.14.10",
"astro": "^4.11.5",
"unbuild": "^2.0.0"
},
"peerDependencies": {
"vite": "^2.9.0 || ^3.0.0-0 || ^4.0.0 || ^5.0.0-0"
},
"peerDependenciesMeta": {
"vite": {
"optional": true
}
}
}

View File

@ -1,18 +1,10 @@
import {
DeleteObjectCommand,
HeadBucketCommand,
HeadObjectCommand,
NoSuchBucket,
NotFound,
PutObjectCommand,
S3Client,
} from "@aws-sdk/client-s3";
import type { AstroIntegration, AstroIntegrationLogger } from "astro";
import { z } from "astro/zod";
import mime from "mime";
import fs from "node:fs";
import path from "node:path";
import { rimrafSync } from "rimraf";
import type { AstroIntegration, AstroIntegrationLogger } from 'astro';
import { z } from 'astro/zod';
import mime from 'mime';
import fs from 'node:fs';
import path from 'node:path';
import { Operator } from 'opendal';
import { rimrafSync } from 'rimraf';
type Options = {
// The directories that you want to upload to S3.
@ -34,6 +26,8 @@ type Options = {
accessKey: string;
// The secret access key.
secretAccessKey: string;
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option.
extraOptions?: Record<string, string>;
};
const S3Options = z
@ -41,35 +35,48 @@ const S3Options = z
paths: z.array(z.string()).min(1),
keep: z.boolean().default(false),
override: z.boolean().default(false),
region: z.string().min(1).default("auto"),
region: z.string().min(1).default('auto'),
endpoint: z.string().url().optional(),
bucket: z.string().min(1),
root: z.string().default("/"),
root: z.string().default('/'),
accessKey: z.string().min(1),
secretAccessKey: z.string().min(1),
extraOptions: z.record(z.string(), z.string()).default({}),
})
.strict()
.superRefine((opts, { addIssue }) => {
if (opts.region === "auto" && opts.endpoint === undefined) {
if (opts.region === 'auto' && opts.endpoint === undefined) {
addIssue({
fatal: true,
code: "custom",
message: "either the region or the endpoint should be provided",
code: 'custom',
message: 'either the region or the endpoint should be provided',
});
}
});
const parseOptions = (
opts: Options,
logger: AstroIntegrationLogger,
): z.infer<typeof S3Options> => {
const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => {
try {
return S3Options.parse(opts);
const { paths, keep, override, region, endpoint, bucket, root, accessKey, secretAccessKey, extraOptions } =
S3Options.parse(opts);
// Create opendal operator options.
// The common configurations are listed here https://docs.rs/opendal/latest/opendal/services/struct.S3.html#configuration
const options: Record<string, string> = {
...extraOptions,
root: root,
bucket: bucket,
region: region,
access_key_id: accessKey,
secret_access_key: secretAccessKey,
};
if (endpoint !== undefined) {
options.endpoint = endpoint;
}
return { options, paths, keep, override };
} catch (err) {
if (err instanceof z.ZodError) {
logger.error(
`Uploader options validation error, there are ${err.issues.length} errors:`,
);
logger.error(`Uploader options validation error, there are ${err.issues.length} errors:`);
for (const issue of err.issues) {
logger.error(issue.message);
}
@ -80,111 +87,54 @@ const parseOptions = (
};
class Uploader {
private client: S3Client;
private options: z.infer<typeof S3Options>;
private operator: Operator;
private override: boolean;
constructor(client: S3Client, options: z.infer<typeof S3Options>) {
this.client = client;
this.options = options;
}
private key(key: string): string {
return path.posix.join(this.options.root, key);
constructor(operator: Operator, override: boolean) {
this.operator = operator;
this.override = override;
}
private async delete(key: string): Promise<void> {
const deleteCmd = new DeleteObjectCommand({
Bucket: this.options.bucket,
Key: this.key(key),
});
await this.client.send(deleteCmd);
await this.operator.delete(key);
}
async isExist(key: string, size: number): Promise<boolean> {
const headCmd = new HeadObjectCommand({
Bucket: this.options.bucket,
Key: this.key(key),
});
try {
const { ContentLength } = await this.client.send(headCmd);
// The file checksum should be uploaded with file. So we only check content length here.
if (
this.options.override ||
(ContentLength !== undefined && ContentLength !== size)
) {
await this.delete(key);
const exist = await this.operator.isExist(key);
if (exist) {
const { contentLength } = await this.operator.stat(key);
if (contentLength !== null && contentLength !== BigInt(size)) {
if (this.override) {
await this.operator.delete(key);
return false;
}
return true;
} catch (error) {
if (error instanceof NotFound) {
}
}
return false;
}
throw error;
}
}
async write(key: string, body: Buffer) {
const contentType = mime.getType(key);
const putCmd = new PutObjectCommand({
Bucket: this.options.bucket,
Key: this.key(key),
Body: body,
ContentType: contentType === null ? undefined : contentType,
});
await this.client.send(putCmd);
await this.operator.write(key, body, { contentType: contentType === null ? undefined : contentType });
}
}
export const uploader = (opts: Options): AstroIntegration => ({
name: "S3 Uploader",
name: 'S3 Uploader',
hooks: {
"astro:build:done": async ({
dir,
logger,
}: { dir: URL; logger: AstroIntegrationLogger }) => {
const options = parseOptions(opts, logger);
const {
paths,
keep,
region,
endpoint,
bucket,
accessKey,
secretAccessKey,
} = options;
const client = new S3Client({
region: region,
endpoint: endpoint,
credentials: {
accessKeyId: accessKey,
secretAccessKey: secretAccessKey,
},
useGlobalEndpoint: endpoint !== undefined && endpoint !== "",
});
'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => {
const { options, paths, keep, override } = parseOptions(opts, logger);
const operator = new Operator('s3', options);
logger.info("Try to verify the S3 credentials.");
logger.info('Try to verify the S3 credentials.');
await operator.check();
try {
await client.send(new HeadBucketCommand({ Bucket: bucket }));
} catch (err) {
// If the bucket is not existed.
if (err instanceof NoSuchBucket) {
logger.error(
`The bucket ${bucket} isn't existed on the region: ${region} endpoint: ${endpoint}`,
);
} else {
logger.error(JSON.stringify(err));
}
throw err;
}
logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`);
logger.info(
`Start to upload static files in dir ${paths} to S3 compatible backend.`,
);
const uploader = new Uploader(client, options);
const uploader = new Uploader(operator, override);
for (const current of paths) {
await uploadFile(uploader, logger, current, dir.pathname);
if (!keep) {
@ -192,24 +142,17 @@ export const uploader = (opts: Options): AstroIntegration => ({
}
}
logger.info("Upload all the files successfully.");
logger.info('Upload all the files successfully.');
},
},
});
// Change the windows path into the unix path.
const normalizePath = (current: string): string => {
return current.includes(path.win32.sep)
? current.split(path.win32.sep).join(path.posix.sep)
: current;
return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current;
};
const uploadFile = async (
uploader: Uploader,
logger: AstroIntegrationLogger,
current: string,
root: string,
) => {
const uploadFile = async (uploader: Uploader, logger: AstroIntegrationLogger, current: string, root: string) => {
const filePath = path.join(root, current);
const fileStats = fs.statSync(filePath);
const isFile = !fileStats.isDirectory();
@ -229,7 +172,7 @@ const uploadFile = async (
} else {
// Reclusive upload files.
for (const next of fs.readdirSync(filePath)) {
if (next.startsWith(".")) {
if (next.startsWith('.')) {
continue;
}
await uploadFile(uploader, logger, path.join(current, next), root);