feat: change S3 node client to opendal. 😎
This commit is contained in:
parent
6edaea9245
commit
c9c1a3fc78
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@ -91,6 +91,7 @@
|
|||||||
"nofollow",
|
"nofollow",
|
||||||
"nopd",
|
"nopd",
|
||||||
"noto",
|
"noto",
|
||||||
|
"opendal",
|
||||||
"oppo",
|
"oppo",
|
||||||
"opposans",
|
"opposans",
|
||||||
"pandiyan",
|
"pandiyan",
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM node:lts-alpine AS base
|
FROM node:lts AS base
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY package.json package-lock.json ./
|
COPY package.json package-lock.json ./
|
||||||
|
|
||||||
|
1868
package-lock.json
generated
1868
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -56,7 +56,6 @@
|
|||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@astrojs/check": "^0.7.0",
|
"@astrojs/check": "^0.7.0",
|
||||||
"@aws-sdk/client-s3": "^3.600.0",
|
|
||||||
"@biomejs/biome": "^1.8.2",
|
"@biomejs/biome": "^1.8.2",
|
||||||
"@napi-rs/canvas": "^0.1.53",
|
"@napi-rs/canvas": "^0.1.53",
|
||||||
"@types/lodash": "^4.17.5",
|
"@types/lodash": "^4.17.5",
|
||||||
@ -67,6 +66,7 @@
|
|||||||
"@types/unist": "^3.0.2",
|
"@types/unist": "^3.0.2",
|
||||||
"aplayer": "^1.10.1",
|
"aplayer": "^1.10.1",
|
||||||
"bootstrap": "^5.3.3",
|
"bootstrap": "^5.3.3",
|
||||||
|
"opendal": "^0.46.1",
|
||||||
"prettier": "^3.3.2",
|
"prettier": "^3.3.2",
|
||||||
"prettier-plugin-astro": "^0.14.0",
|
"prettier-plugin-astro": "^0.14.0",
|
||||||
"prettier-plugin-astro-organize-imports": "^0.4.8",
|
"prettier-plugin-astro-organize-imports": "^0.4.8",
|
||||||
|
@ -1,60 +1,78 @@
|
|||||||
import { HeadObjectCommand, NotFound, PutObjectCommand, S3Client } from '@aws-sdk/client-s3';
|
|
||||||
import type { Logger } from '@smithy/types';
|
|
||||||
import type { AstroIntegration, AstroIntegrationLogger } from 'astro';
|
import type { AstroIntegration, AstroIntegrationLogger } from 'astro';
|
||||||
import { z } from 'astro/zod';
|
import { z } from 'astro/zod';
|
||||||
import fs from 'node:fs';
|
import fs from 'node:fs';
|
||||||
import path from 'node:path';
|
import path from 'node:path';
|
||||||
|
import { Operator } from 'opendal';
|
||||||
import { rimrafSync } from 'rimraf';
|
import { rimrafSync } from 'rimraf';
|
||||||
|
|
||||||
const S3Options = z.object({
|
const S3Options = z
|
||||||
|
.object({
|
||||||
|
// The directories that you want to upload to S3.
|
||||||
paths: z.array(z.string()).min(1),
|
paths: z.array(z.string()).min(1),
|
||||||
region: z.string().min(1).default('us-east-1'),
|
// The S3 region, set it if you use AWS S3 service.
|
||||||
|
region: z.string().min(1).default('auto'),
|
||||||
|
// The endpoint, set it if you use 3rd-party S3 service.
|
||||||
endpoint: z.string().url().optional(),
|
endpoint: z.string().url().optional(),
|
||||||
|
// The name of the bucket.
|
||||||
bucket: z.string().min(1),
|
bucket: z.string().min(1),
|
||||||
|
// The root directory you want to upload files.
|
||||||
|
root: z.string().default('/'),
|
||||||
|
// The access key id.
|
||||||
accessKey: z.string().min(1),
|
accessKey: z.string().min(1),
|
||||||
|
// The secret access key.
|
||||||
secretAccessKey: z.string().min(1),
|
secretAccessKey: z.string().min(1),
|
||||||
});
|
// The extra options provided by opendal.
|
||||||
|
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an option.
|
||||||
|
extraOptions: z.record(z.string(), z.string()).default({}),
|
||||||
|
})
|
||||||
|
.strict()
|
||||||
|
.refine((opts) => (opts.region === 'auto' ? opts.endpoint !== undefined : true));
|
||||||
|
|
||||||
|
const parseOptions = (
|
||||||
|
opts: z.input<typeof S3Options>,
|
||||||
|
logger: AstroIntegrationLogger,
|
||||||
|
): { options: Record<string, string>; paths: string[] } => {
|
||||||
|
try {
|
||||||
|
const { paths, bucket, root, accessKey, secretAccessKey, region, endpoint, extraOptions } = S3Options.parse(opts);
|
||||||
|
|
||||||
|
// Create opendal operator.
|
||||||
|
// The common configurations are listed here https://docs.rs/opendal/latest/opendal/services/struct.S3.html#configuration
|
||||||
|
const options: Record<string, string> = {
|
||||||
|
...extraOptions,
|
||||||
|
root: root,
|
||||||
|
bucket: bucket,
|
||||||
|
region: region,
|
||||||
|
access_key_id: accessKey,
|
||||||
|
secret_access_key: secretAccessKey,
|
||||||
|
};
|
||||||
|
if (endpoint !== undefined) {
|
||||||
|
options.endpoint = endpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { options, paths };
|
||||||
|
} catch (err) {
|
||||||
|
if (err instanceof z.ZodError) {
|
||||||
|
logger.error(`Uploader options validation error, there are ${err.issues.length} errors:`);
|
||||||
|
for (const issue of err.issues) {
|
||||||
|
logger.error(issue.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
export const uploader = (opts: z.input<typeof S3Options>): AstroIntegration => ({
|
export const uploader = (opts: z.input<typeof S3Options>): AstroIntegration => ({
|
||||||
name: 'S3 Uploader',
|
name: 'S3 Uploader',
|
||||||
hooks: {
|
hooks: {
|
||||||
'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => {
|
'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => {
|
||||||
const { paths, bucket, accessKey, secretAccessKey, region, endpoint } = S3Options.parse(opts);
|
const { options, paths } = parseOptions(opts, logger);
|
||||||
|
const operator = new Operator('s3', options);
|
||||||
// Create S3 Client.
|
|
||||||
const clientLogger = (): Logger => {
|
|
||||||
const s3Logger = logger.fork('S3 Client');
|
|
||||||
return {
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: It's define by external types.
|
|
||||||
debug: (...content: any[]): void => {
|
|
||||||
s3Logger.debug(content.join(' '));
|
|
||||||
},
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: It's define by external types.
|
|
||||||
info: (...content: any[]): void => {
|
|
||||||
s3Logger.info(content.join(' '));
|
|
||||||
},
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: It's define by external types.
|
|
||||||
warn: (...content: any[]): void => {
|
|
||||||
s3Logger.warn(content.join(' '));
|
|
||||||
},
|
|
||||||
// biome-ignore lint/suspicious/noExplicitAny: It's define by external types.
|
|
||||||
error: (...content: any[]): void => {
|
|
||||||
s3Logger.error(content.join(' '));
|
|
||||||
},
|
|
||||||
};
|
|
||||||
};
|
|
||||||
const client = new S3Client({
|
|
||||||
region: region,
|
|
||||||
endpoint: endpoint,
|
|
||||||
logger: clientLogger(),
|
|
||||||
credentials: { accessKeyId: accessKey, secretAccessKey: secretAccessKey },
|
|
||||||
useGlobalEndpoint: endpoint === '' || endpoint === '',
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`);
|
logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`);
|
||||||
|
|
||||||
for (const current of paths) {
|
for (const current of paths) {
|
||||||
await uploadFile(client, logger, bucket, current, dir.pathname);
|
await uploadFile(operator, logger, current, dir.pathname);
|
||||||
rimrafSync(path.join(dir.pathname, current));
|
rimrafSync(path.join(dir.pathname, current));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,32 +86,26 @@ const normalizePath = (current: string): string => {
|
|||||||
return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current;
|
return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current;
|
||||||
};
|
};
|
||||||
|
|
||||||
const uploadFile = async (
|
const uploadFile = async (operator: Operator, logger: AstroIntegrationLogger, current: string, root: string) => {
|
||||||
client: S3Client,
|
|
||||||
logger: AstroIntegrationLogger,
|
|
||||||
bucket: string,
|
|
||||||
current: string,
|
|
||||||
root: string,
|
|
||||||
) => {
|
|
||||||
const filePath = path.join(root, current);
|
const filePath = path.join(root, current);
|
||||||
const isFile = !fs.statSync(filePath).isDirectory();
|
const isFile = !fs.statSync(filePath).isDirectory();
|
||||||
|
const uploadAction = async (key: string) => {
|
||||||
|
logger.info(`Start to upload file: ${key}`);
|
||||||
|
const body = fs.readFileSync(filePath);
|
||||||
|
await operator.write(key, body);
|
||||||
|
};
|
||||||
|
|
||||||
if (isFile) {
|
if (isFile) {
|
||||||
const key = normalizePath(current);
|
const key = normalizePath(current);
|
||||||
const headCmd = new HeadObjectCommand({ Bucket: bucket, Key: key });
|
|
||||||
try {
|
try {
|
||||||
await client.send(headCmd);
|
const meta = await operator.stat(key);
|
||||||
|
if (meta.isFile()) {
|
||||||
logger.info(`${key} exists on backend, skip.`);
|
logger.info(`${key} exists on backend, skip.`);
|
||||||
} catch (error) {
|
|
||||||
if (error instanceof NotFound) {
|
|
||||||
logger.info(`Start to upload file: ${key}`);
|
|
||||||
|
|
||||||
const body = fs.readFileSync(filePath);
|
|
||||||
const putCmd = new PutObjectCommand({ Bucket: bucket, Key: key, Body: body });
|
|
||||||
await client.send(putCmd);
|
|
||||||
} else {
|
} else {
|
||||||
throw error;
|
await uploadAction(key);
|
||||||
}
|
}
|
||||||
|
} catch (error) {
|
||||||
|
await uploadAction(key);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -103,6 +115,6 @@ const uploadFile = async (
|
|||||||
if (next.startsWith('.')) {
|
if (next.startsWith('.')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
await uploadFile(client, logger, bucket, path.join(current, next), root);
|
await uploadFile(operator, logger, path.join(current, next), root);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user