feat: add the switch for uploading and make the upload path can be configurable.
This commit is contained in:
parent
509c7a69d6
commit
523e4febf5
24
README.md
24
README.md
@ -50,9 +50,25 @@ export default defineConfig({
|
||||
## Options
|
||||
|
||||
```ts
|
||||
type Path = {
|
||||
// The directory in the astro static build that you want to upload to S3.
|
||||
path: string;
|
||||
// Whether to upload the files that locates in the inner directory.
|
||||
recursive?: boolean;
|
||||
// Whether to keep the original files after uploading.
|
||||
keep?: boolean;
|
||||
// Whether to override the existing files on S3.
|
||||
// It will be override only when the content-length don't match the file size by default.
|
||||
override?: boolean;
|
||||
};
|
||||
|
||||
type Options = {
|
||||
// The directories that you want to upload to S3.
|
||||
paths: string[];
|
||||
// Enable the uploader
|
||||
enable?: boolean;
|
||||
// The directory in the astro static build that you want to upload to S3.
|
||||
paths: Array<string | Path>;
|
||||
// Whether to upload the files that locates in the inner directory.
|
||||
recursive?: boolean;
|
||||
// Whether to keep the original files after uploading.
|
||||
keep?: boolean;
|
||||
// Whether to override the existing files on S3.
|
||||
@ -64,13 +80,13 @@ type Options = {
|
||||
endpoint?: string;
|
||||
// The name of the bucket.
|
||||
bucket: string;
|
||||
// The root directory you want to upload files.
|
||||
// The root directory in S3 service that you want to upload files.
|
||||
root?: string;
|
||||
// The access key id.
|
||||
accessKey: string;
|
||||
// The secret access key.
|
||||
secretAccessKey: string;
|
||||
// All the methods in https://docs.rs/opendal/latest/opendal/services/struct.S3.html#implementations can be treated as an extra option.
|
||||
extraOptions?: Record<string, string>
|
||||
extraOptions?: Record<string, string>;
|
||||
};
|
||||
```
|
||||
|
3143
package-lock.json
generated
3143
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
10
package.json
10
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "astro-uploader",
|
||||
"version": "1.1.3",
|
||||
"version": "1.2.0",
|
||||
"description": "A uploader for uploading the Astro generated files through the S3 API.",
|
||||
"keywords": ["Astro", "S3", "withastro", "opendal"],
|
||||
"bugs": {
|
||||
@ -31,13 +31,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"mime": "^4.0.4",
|
||||
"opendal": "^0.47.1",
|
||||
"opendal": "^0.47.7",
|
||||
"rimraf": "^6.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.8.3",
|
||||
"@types/node": "^22.1.0",
|
||||
"astro": "^4.13.1",
|
||||
"@biomejs/biome": "^1.9.4",
|
||||
"@types/node": "^22.10.1",
|
||||
"astro": "^4.16.16",
|
||||
"unbuild": "^2.0.0"
|
||||
}
|
||||
}
|
||||
|
123
src/index.ts
123
src/index.ts
@ -6,9 +6,25 @@ import path from 'node:path';
|
||||
import { Operator } from 'opendal';
|
||||
import { rimrafSync } from 'rimraf';
|
||||
|
||||
type Path = {
|
||||
// The directory in the astro static build that you want to upload to S3.
|
||||
path: string;
|
||||
// Whether to upload the files that locates in the inner directory.
|
||||
recursive?: boolean;
|
||||
// Whether to keep the original files after uploading.
|
||||
keep?: boolean;
|
||||
// Whether to override the existing files on S3.
|
||||
// It will be override only when the content-length don't match the file size by default.
|
||||
override?: boolean;
|
||||
};
|
||||
|
||||
type Options = {
|
||||
// The directories that you want to upload to S3.
|
||||
paths: string[];
|
||||
// Enable the uploader
|
||||
enable?: boolean;
|
||||
// The directory in the astro static build that you want to upload to S3.
|
||||
paths: Array<string | Path>;
|
||||
// Whether to upload the files that locates in the inner directory.
|
||||
recursive?: boolean;
|
||||
// Whether to keep the original files after uploading.
|
||||
keep?: boolean;
|
||||
// Whether to override the existing files on S3.
|
||||
@ -20,7 +36,7 @@ type Options = {
|
||||
endpoint?: string;
|
||||
// The name of the bucket.
|
||||
bucket: string;
|
||||
// The root directory you want to upload files.
|
||||
// The root directory in S3 service that you want to upload files.
|
||||
root?: string;
|
||||
// The access key id.
|
||||
accessKey: string;
|
||||
@ -32,8 +48,22 @@ type Options = {
|
||||
|
||||
const S3Options = z
|
||||
.object({
|
||||
paths: z.array(z.string()).min(1),
|
||||
enable: z.boolean().optional().default(true),
|
||||
paths: z
|
||||
.array(
|
||||
z.union([
|
||||
z.string(),
|
||||
z.object({
|
||||
path: z.string(),
|
||||
keep: z.boolean().optional(),
|
||||
recursive: z.boolean().optional(),
|
||||
override: z.boolean(),
|
||||
}),
|
||||
]),
|
||||
)
|
||||
.min(1),
|
||||
keep: z.boolean().default(false),
|
||||
recursive: z.boolean().default(true),
|
||||
override: z.boolean().default(false),
|
||||
region: z.string().min(1).default('auto'),
|
||||
endpoint: z.string().url().optional(),
|
||||
@ -56,8 +86,20 @@ const S3Options = z
|
||||
|
||||
const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => {
|
||||
try {
|
||||
const { paths, keep, override, region, endpoint, bucket, root, accessKey, secretAccessKey, extraOptions } =
|
||||
S3Options.parse(opts);
|
||||
const {
|
||||
enable,
|
||||
paths,
|
||||
recursive,
|
||||
keep,
|
||||
override,
|
||||
region,
|
||||
endpoint,
|
||||
bucket,
|
||||
root,
|
||||
accessKey,
|
||||
secretAccessKey,
|
||||
extraOptions,
|
||||
} = S3Options.parse(opts);
|
||||
|
||||
// Create opendal operator options.
|
||||
// The common configurations are listed here https://docs.rs/opendal/latest/opendal/services/struct.S3.html#configuration
|
||||
@ -73,7 +115,18 @@ const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => {
|
||||
options.endpoint = endpoint;
|
||||
}
|
||||
|
||||
return { options, paths, keep, override };
|
||||
const resolvedPaths = paths.map((path) =>
|
||||
typeof path === 'string'
|
||||
? { path, recursive, keep, override }
|
||||
: {
|
||||
path: path.path,
|
||||
recursive: path.recursive === undefined ? recursive : path.recursive,
|
||||
keep: path.keep === undefined ? keep : path.keep,
|
||||
override: path.override === undefined ? override : path.override,
|
||||
},
|
||||
);
|
||||
|
||||
return { options, paths: resolvedPaths, enable };
|
||||
} catch (err) {
|
||||
if (err instanceof z.ZodError) {
|
||||
logger.error(`Uploader options validation error, there are ${err.issues.length} errors:`);
|
||||
@ -88,17 +141,15 @@ const parseOptions = (opts: Options, logger: AstroIntegrationLogger) => {
|
||||
|
||||
class Uploader {
|
||||
private operator: Operator;
|
||||
private override: boolean;
|
||||
|
||||
constructor(operator: Operator, override: boolean) {
|
||||
constructor(operator: Operator) {
|
||||
this.operator = operator;
|
||||
this.override = override;
|
||||
}
|
||||
|
||||
async isExist(key: string, size: number): Promise<boolean> {
|
||||
async isExist(key: string, size: number, override: boolean): Promise<boolean> {
|
||||
try {
|
||||
const { contentLength } = await this.operator.stat(key);
|
||||
if ((contentLength !== null && contentLength !== BigInt(size)) || this.override) {
|
||||
if ((contentLength !== null && contentLength !== BigInt(size)) || override) {
|
||||
await this.operator.delete(key);
|
||||
return false;
|
||||
}
|
||||
@ -125,19 +176,25 @@ export const uploader = (opts: Options): AstroIntegration => ({
|
||||
name: 'S3 Uploader',
|
||||
hooks: {
|
||||
'astro:build:done': async ({ dir, logger }: { dir: URL; logger: AstroIntegrationLogger }) => {
|
||||
const { options, paths, keep, override } = parseOptions(opts, logger);
|
||||
const operator = new Operator('s3', options);
|
||||
const { options, paths, enable } = parseOptions(opts, logger);
|
||||
if (!enable) {
|
||||
logger.warn('Skip the astro uploader.');
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info('Try to verify the S3 credentials.');
|
||||
const operator = new Operator('s3', options);
|
||||
await operator.check();
|
||||
|
||||
logger.info(`Start to upload static files in dir ${paths} to S3 compatible backend.`);
|
||||
|
||||
const uploader = new Uploader(operator, override);
|
||||
const uploader = new Uploader(operator);
|
||||
for (const current of paths) {
|
||||
await uploadFile(uploader, logger, current, dir.pathname);
|
||||
if (!keep) {
|
||||
rimrafSync(path.join(dir.pathname, current));
|
||||
if (!current.keep && current.recursive) {
|
||||
const resolvedPath = path.join(dir.pathname, current.path);
|
||||
logger.info(`Remove the path: ${resolvedPath}`);
|
||||
// Delete the whole path
|
||||
rimrafSync(resolvedPath);
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,8 +208,18 @@ const normalizePath = (current: string): string => {
|
||||
return current.includes(path.win32.sep) ? current.split(path.win32.sep).join(path.posix.sep) : current;
|
||||
};
|
||||
|
||||
const uploadFile = async (uploader: Uploader, logger: AstroIntegrationLogger, current: string, root: string) => {
|
||||
const filePath = path.join(root, current);
|
||||
const uploadFile = async (
|
||||
uploader: Uploader,
|
||||
logger: AstroIntegrationLogger,
|
||||
current: {
|
||||
path: string;
|
||||
recursive: boolean;
|
||||
keep: boolean;
|
||||
override: boolean;
|
||||
},
|
||||
root: string,
|
||||
) => {
|
||||
const filePath = current.path;
|
||||
const fileStats = fs.statSync(filePath);
|
||||
const isFile = !fileStats.isDirectory();
|
||||
const uploadAction = async (key: string) => {
|
||||
@ -162,19 +229,27 @@ const uploadFile = async (uploader: Uploader, logger: AstroIntegrationLogger, cu
|
||||
};
|
||||
|
||||
if (isFile) {
|
||||
const key = normalizePath(current);
|
||||
if (await uploader.isExist(key, fileStats.size)) {
|
||||
const key = normalizePath(path.join(root, current.path));
|
||||
if (await uploader.isExist(key, fileStats.size, current.override)) {
|
||||
logger.info(`${key} exists on backend, skip.`);
|
||||
} else {
|
||||
await uploadAction(key);
|
||||
}
|
||||
|
||||
if (!current.keep && !current.recursive) {
|
||||
rimrafSync(current.path);
|
||||
}
|
||||
} else {
|
||||
// Reclusive upload files.
|
||||
// Reclusive upload files or only upload the first hierarchy of the files.
|
||||
for (const next of fs.readdirSync(filePath)) {
|
||||
if (next.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
await uploadFile(uploader, logger, path.join(current, next), root);
|
||||
|
||||
const nextFilePath = path.join(current.path, next);
|
||||
if (current.recursive || !fs.statSync(nextFilePath).isDirectory()) {
|
||||
await uploadFile(uploader, logger, { ...current, path: nextFilePath }, root);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user