feat: use opendal for uploading.
This commit is contained in:
parent
f128cc81b6
commit
3ac1810435
@ -1,6 +1,7 @@
|
|||||||
# Astro Uploader
|
# Astro Uploader
|
||||||
|
|
||||||
A uploader for uploading the Astro generated files through the S3 API.
|
A uploader for uploading the Astro generated files through the S3 API.
|
||||||
|
This uploader is based on the [Apache OpenDAL™](https://github.com/apache/opendal). If you have any issues in uploading, it could be the issues in OpenDAL, remember to upgrade the OpenDAL to the latest version.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@ -56,5 +57,7 @@ type Options = {
|
|||||||
accessKey: string;
|
accessKey: string;
|
||||||
// The secret access key.
|
// The secret access key.
|
||||||
secretAccessKey: string;
|
secretAccessKey: string;
|
||||||
|
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option.
|
||||||
|
extraOptions?: Record<string, string>
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
28
biome.json
Normal file
28
biome.json
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://biomejs.dev/schemas/1.8.3/schema.json",
|
||||||
|
"formatter": {
|
||||||
|
"enabled": true,
|
||||||
|
"formatWithErrors": false,
|
||||||
|
"indentStyle": "space",
|
||||||
|
"indentWidth": 2,
|
||||||
|
"lineEnding": "lf",
|
||||||
|
"lineWidth": 120,
|
||||||
|
"attributePosition": "auto"
|
||||||
|
},
|
||||||
|
"organizeImports": { "enabled": true },
|
||||||
|
"linter": { "enabled": true, "rules": { "recommended": true } },
|
||||||
|
"javascript": {
|
||||||
|
"formatter": {
|
||||||
|
"jsxQuoteStyle": "double",
|
||||||
|
"quoteProperties": "asNeeded",
|
||||||
|
"trailingCommas": "all",
|
||||||
|
"semicolons": "always",
|
||||||
|
"arrowParentheses": "always",
|
||||||
|
"bracketSpacing": true,
|
||||||
|
"bracketSameLine": false,
|
||||||
|
"quoteStyle": "single",
|
||||||
|
"attributePosition": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": [{ "include": ["**/*.astro"] }]
|
||||||
|
}
|
2325
package-lock.json
generated
2325
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
20
package.json
20
package.json
@ -32,24 +32,18 @@
|
|||||||
],
|
],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "unbuild",
|
"build": "unbuild",
|
||||||
|
"format": "biome format src --write",
|
||||||
"stub": "unbuild --stub"
|
"stub": "unbuild --stub"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@aws-sdk/client-s3": "^3.600.0",
|
"mime": "^4.0.4",
|
||||||
"mime": "^4.0.3",
|
"opendal": "^0.47.0",
|
||||||
"rimraf": "^5.0.7"
|
"rimraf": "^5.0.8"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.14.9",
|
"@biomejs/biome": "^1.8.3",
|
||||||
"astro": "^4.11.3",
|
"@types/node": "^20.14.10",
|
||||||
|
"astro": "^4.11.5",
|
||||||
"unbuild": "^2.0.0"
|
"unbuild": "^2.0.0"
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"vite": "^2.9.0 || ^3.0.0-0 || ^4.0.0 || ^5.0.0-0"
|
|
||||||
},
|
|
||||||
"peerDependenciesMeta": {
|
|
||||||
"vite": {
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
185
src/index.ts
185
src/index.ts
@ -1,18 +1,10 @@
|
|||||||
import {
|
import type { AstroIntegration, AstroIntegrationLogger } from 'astro';
|
||||||
DeleteObjectCommand,
|
import { z } from 'astro/zod';
|
||||||
HeadBucketCommand,
|
import mime from 'mime';
|
||||||
HeadObjectCommand,
|
import fs from 'node:fs';
|
||||||
NoSuchBucket,
|
import path from 'node:path';
|
||||||
NotFound,
|
import { Operator } from 'opendal';
|
||||||
PutObjectCommand,
|
import { rimrafSync } from 'rimraf';
|
||||||
S3Client,
|
|
||||||
} from "@aws-sdk/client-s3";
|
|
||||||
import type { AstroIntegration, AstroIntegrationLogger } from "astro";
|
|
||||||
import { z } from "astro/zod";
|
|
||||||
import mime from "mime";
|
|
||||||
import fs from "node:fs";
|
|
||||||
import path from "node:path";
|
|
||||||
import { rimrafSync } from "rimraf";
|
|
||||||
|
|
||||||
type Options = {
|
type Options = {
|
||||||
// The directories that you want to upload to S3.
|
// The directories that you want to upload to S3.
|
||||||
@ -34,6 +26,8 @@ type Options = {
|
|||||||
accessKey: string;
|
accessKey: string;
|
||||||
// The secret access key.
|
// The secret access key.
|
||||||
secretAccessKey: string;
|
secretAccessKey: string;
|
||||||
|
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option.
|
||||||
|
extraOptions?: Record<string, string>;
|
||||||
};
|
};
|
||||||
|
|
||||||
const S3Options = z
|
const S3Options = z
|
||||||
@ -41,35 +35,48 @@ const S3Options = z
|
|||||||
paths: z.array(z.string()).min(1),
|
paths: z.array(z.string()).min(1),
|
||||||
keep: z.boolean().default(false),
|
keep: z.boolean().default(false),
|
||||||
override: z.boolean().default(false),
|
override: z.boolean().default(false),
|
||||||
region: z.string().min(1).default("auto"),
|
region: z.string().min(1).default('auto'),
|
||||||
endpoint: z.string().url().optional(),
|
endpoint: z.string().url().optional(),
|
||||||
bucket: z.string().min(1),
|
bucket: z.string().min(1),
|
||||||
root: z.string().default("/"),
|
root: z.string().default('/'),
|
||||||
accessKey: z.string().min(1),
|
accessKey: z.string().min(1),
|
||||||
secretAccessKey: z.string().min(1),
|
secretAccessKey: z.string().min(1),
|
||||||
|
extraOptions: z.record(z.string(), z.string()).default({}),
|
||||||
})
|
})
|
||||||
.strict()
|
.strict()
|
||||||
.superRefine((opts, { addIssue }) => {
|
.superRefine((opts, { addIssue }) => {
|
||||||
if (opts.region === "auto" && opts.endpoint === undefined) {
|
if (opts.region === 'auto' && opts.endpoint === undefined) {
|
||||||
addIssue({
|
addIssue({
|
||||||
fatal: true,
|
fatal: true,
|
||||||
code: "custom",
|
code: 'custom',
|
||||||
message: "either the region or the endpoint should be provided",
|
message: 'either the region or the endpoint should be provided',
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const parseOptions = (
|
const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => {
|
||||||
opts: Options,
|
|
||||||
logger: AstroIntegrationLogger,
|
|
||||||
): z.infer<typeof S3Options> => {
|
|
||||||
try {
|
try {
|
||||||
return S3Options.parse(opts);
|
const { paths, keep, override, region, endpoint, bucket, root, accessKey, secretAccessKey, extraOptions } =
|
||||||
|
S3Options.parse(opts);
|
||||||
|
|
||||||
|
// Create opendal operator options.
|
||||||
|
// The common configurations are listed here https://docs.rs/opendal/latest/opendal/services/struct.S3.html#configuration
|
||||||
|
const options: Record<string, string> = {
|
||||||
|
...extraOptions,
|
||||||
|
root: root,
|
||||||
|
bucket: bucket,
|
||||||
|
region: region,
|
||||||
|
access_key_id: accessKey,
|
||||||
|
secret_access_key: secretAccessKey,
|
||||||
|
};
|
||||||
|
if (endpoint !== undefined) {
|
||||||
|
options.endpoint = endpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { options, paths, keep, override };
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (err instanceof z.ZodError) {
|
if (err instanceof z.ZodError) {
|
||||||
logger.error(
|
logger.error(`Uploader options validation error, there are ${err.issues.length} errors:`);
|
||||||
`Uploader options validation error, there are ${err.issues.length} errors:`,
|
|
||||||
);
|
|
||||||
for (const issue of err.issues) {
|
for (const issue of err.issues) {
|
||||||
logger.error(issue.message);
|
logger.error(issue.message);
|
||||||
}
|
}
|
||||||
@ -80,111 +87,54 @@ const parseOptions = (
|
|||||||
};
|
};
|
||||||
|
|
||||||
class Uploader {
|
class Uploader {
|
||||||
private client: S3Client;
|
private operator: Operator;
|
||||||
private options: z.infer<typeof S3Options>;
|
private override: boolean;
|
||||||
|
|
||||||
constructor(client: S3Client, options: z.infer<typeof S3Options>) {
|
constructor(operator: Operator, override: boolean) {
|
||||||
this.client = client;
|
this.operator = operator;
|
||||||
this.options = options;
|
this.override = override;
|
||||||
}
|
|
||||||
|
|
||||||
private key(key: string): string {
|
|
||||||
return path.posix.join(this.options.root, key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private async delete(key: string): Promise<void> {
|
private async delete(key: string): Promise<void> {
|
||||||
const deleteCmd = new DeleteObjectCommand({
|
await this.operator.delete(key);
|
||||||
Bucket: this.options.bucket,
|
|
||||||
Key: this.key(key),
|
|
||||||
});
|
|
||||||
await this.client.send(deleteCmd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async isExist(key: string, size: number): Promise<boolean> {
|
async isExist(key: string, size: number): Promise<boolean> {
|
||||||
const headCmd = new HeadObjectCommand({
|
const exist = await this.operator.isExist(key);
|
||||||
Bucket: this.options.bucket,
|
if (exist) {
|
||||||
Key: this.key(key),
|
const { contentLength } = await this.operator.stat(key);
|
||||||
});
|
if (contentLength !== null && contentLength !== BigInt(size)) {
|
||||||
try {
|
if (this.override) {
|
||||||
const { ContentLength } = await this.client.send(headCmd);
|
await this.operator.delete(key);
|
||||||
// The file checksum should be uploaded with file. So we only check content length here.
|
|
||||||
if (
|
|
||||||
this.options.override ||
|
|
||||||
(ContentLength !== undefined && ContentLength !== size)
|
|
||||||
) {
|
|
||||||
await this.delete(key);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} catch (error) {
|
}
|
||||||
if (error instanceof NotFound) {
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async write(key: string, body: Buffer) {
|
async write(key: string, body: Buffer) {
|
||||||
const contentType = mime.getType(key);
|
const contentType = mime.getType(key);
|
||||||
const putCmd = new PutObjectCommand({
|
await this.operator.write(key, body, { contentType: contentType === null ? undefined : contentType });
|
||||||
Bucket: this.options.bucket,
|
|
||||||
Key: this.key(key),
|
|
||||||
Body: body,
|
|
||||||
ContentType: contentType === null ? undefined : contentType,
|
|
||||||
});
|
|
||||||
|
|
||||||
await this.client.send(putCmd);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const uploader = (opts: Options): AstroIntegration => ({
|
export const uploader = (opts: Options): AstroIntegration => ({
|
||||||
name: "S3 Uploader",
|
name: 'S3 Uploader',
|
||||||
hooks: {
|
hooks: {
|
||||||
"astro:build:done": async ({
|
'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => {
|
||||||
dir,
|
const { options, paths, keep, override } = parseOptions(opts, logger);
|
||||||
logger,
|
const operator = new Operator('s3', options);
|
||||||
}: { dir: URL; logger: AstroIntegrationLogger }) => {
|
|
||||||
const options = parseOptions(opts, logger);
|
|
||||||
const {
|
|
||||||
paths,
|
|
||||||
keep,
|
|
||||||
region,
|
|
||||||
endpoint,
|
|
||||||
bucket,
|
|
||||||
accessKey,
|
|
||||||
secretAccessKey,
|
|
||||||
} = options;
|
|
||||||
const client = new S3Client({
|
|
||||||
region: region,
|
|
||||||
endpoint: endpoint,
|
|
||||||
credentials: {
|
|
||||||
accessKeyId: accessKey,
|
|
||||||
secretAccessKey: secretAccessKey,
|
|
||||||
},
|
|
||||||
useGlobalEndpoint: endpoint !== undefined && endpoint !== "",
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.info("Try to verify the S3 credentials.");
|
logger.info('Try to verify the S3 credentials.');
|
||||||
|
await operator.check();
|
||||||
|
|
||||||
try {
|
logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`);
|
||||||
await client.send(new HeadBucketCommand({ Bucket: bucket }));
|
|
||||||
} catch (err) {
|
|
||||||
// If the bucket is not existed.
|
|
||||||
if (err instanceof NoSuchBucket) {
|
|
||||||
logger.error(
|
|
||||||
`The bucket ${bucket} isn't existed on the region: ${region} endpoint: ${endpoint}`,
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
logger.error(JSON.stringify(err));
|
|
||||||
}
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(
|
const uploader = new Uploader(operator, override);
|
||||||
`Start to upload static files in dir ${paths} to S3 compatible backend.`,
|
|
||||||
);
|
|
||||||
|
|
||||||
const uploader = new Uploader(client, options);
|
|
||||||
for (const current of paths) {
|
for (const current of paths) {
|
||||||
await uploadFile(uploader, logger, current, dir.pathname);
|
await uploadFile(uploader, logger, current, dir.pathname);
|
||||||
if (!keep) {
|
if (!keep) {
|
||||||
@ -192,24 +142,17 @@ export const uploader = (opts: Options): AstroIntegration => ({
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("Upload all the files successfully.");
|
logger.info('Upload all the files successfully.');
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Change the windows path into the unix path.
|
// Change the windows path into the unix path.
|
||||||
const normalizePath = (current: string): string => {
|
const normalizePath = (current: string): string => {
|
||||||
return current.includes(path.win32.sep)
|
return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current;
|
||||||
? current.split(path.win32.sep).join(path.posix.sep)
|
|
||||||
: current;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const uploadFile = async (
|
const uploadFile = async (uploader: Uploader, logger: AstroIntegrationLogger, current: string, root: string) => {
|
||||||
uploader: Uploader,
|
|
||||||
logger: AstroIntegrationLogger,
|
|
||||||
current: string,
|
|
||||||
root: string,
|
|
||||||
) => {
|
|
||||||
const filePath = path.join(root, current);
|
const filePath = path.join(root, current);
|
||||||
const fileStats = fs.statSync(filePath);
|
const fileStats = fs.statSync(filePath);
|
||||||
const isFile = !fileStats.isDirectory();
|
const isFile = !fileStats.isDirectory();
|
||||||
@ -229,7 +172,7 @@ const uploadFile = async (
|
|||||||
} else {
|
} else {
|
||||||
// Reclusive upload files.
|
// Reclusive upload files.
|
||||||
for (const next of fs.readdirSync(filePath)) {
|
for (const next of fs.readdirSync(filePath)) {
|
||||||
if (next.startsWith(".")) {
|
if (next.startsWith('.')) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
await uploadFile(uploader, logger, path.join(current, next), root);
|
await uploadFile(uploader, logger, path.join(current, next), root);
|
||||||
|
Loading…
Reference in New Issue
Block a user