Implements cloud-based file storage for plant photos, documents, and certificates: Storage Layer: - Multi-provider support (AWS S3, Cloudflare R2, MinIO, local filesystem) - S3-compatible provider with presigned URL generation - Local storage provider for development with signed URL verification - Configurable via environment variables Image Processing: - Automatic thumbnail generation (150x150, 300x300, 600x600, 1200x1200) - WebP conversion for optimized file sizes - EXIF data extraction for image metadata - Image optimization with Sharp API Endpoints: - POST /api/upload/image - Upload images with automatic processing - POST /api/upload/document - Upload documents (PDF, DOC, DOCX) - POST /api/upload/presigned - Get presigned URLs for direct uploads - GET/DELETE /api/upload/[fileId] - File management UI Components: - ImageUploader - Drag & drop image upload with preview - PhotoGallery - Grid gallery with lightbox view - DocumentUploader - Document upload with file type icons - ProgressBar - Animated upload progress indicator Database: - FileStore service with in-memory storage (Prisma schema ready for Agent 2) - File metadata tracking with soft delete support - Category-based file organization
269 lines
6.3 KiB
TypeScript
269 lines
6.3 KiB
TypeScript
/**
|
|
* Image Processing Pipeline for LocalGreenChain
|
|
* Agent 3: File Upload & Storage System
|
|
*
|
|
* Handles image optimization, thumbnail generation, and EXIF extraction
|
|
*/
|
|
|
|
import sharp from 'sharp';
|
|
import {
|
|
ImageSize,
|
|
ThumbnailConfig,
|
|
THUMBNAIL_CONFIGS,
|
|
ExifData,
|
|
ImageProcessingOptions,
|
|
} from './types';
|
|
|
|
export interface ImageMetadata {
|
|
width: number;
|
|
height: number;
|
|
format: string;
|
|
space?: string;
|
|
channels?: number;
|
|
hasAlpha?: boolean;
|
|
orientation?: number;
|
|
}
|
|
|
|
export class ImageProcessor {
|
|
private defaultQuality = 85;
|
|
|
|
/**
|
|
* Get image metadata
|
|
*/
|
|
async getMetadata(buffer: Buffer): Promise<ImageMetadata> {
|
|
const metadata = await sharp(buffer).metadata();
|
|
|
|
return {
|
|
width: metadata.width || 0,
|
|
height: metadata.height || 0,
|
|
format: metadata.format || 'unknown',
|
|
space: metadata.space,
|
|
channels: metadata.channels,
|
|
hasAlpha: metadata.hasAlpha,
|
|
orientation: metadata.orientation,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Extract EXIF data from image
|
|
*/
|
|
async extractExif(buffer: Buffer): Promise<ExifData | undefined> {
|
|
try {
|
|
const metadata = await sharp(buffer).metadata();
|
|
|
|
if (!metadata.exif) {
|
|
return undefined;
|
|
}
|
|
|
|
// Parse basic EXIF data
|
|
const exifData: ExifData = {};
|
|
|
|
// Sharp provides some EXIF data directly
|
|
if (metadata.orientation) {
|
|
exifData.orientation = metadata.orientation;
|
|
}
|
|
|
|
// For more detailed EXIF parsing, we would need an EXIF library
|
|
// For now, return basic data
|
|
return Object.keys(exifData).length > 0 ? exifData : undefined;
|
|
} catch (error) {
|
|
console.warn('Error extracting EXIF data:', error);
|
|
return undefined;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Optimize an image for web
|
|
*/
|
|
async optimize(
|
|
buffer: Buffer,
|
|
options: ImageProcessingOptions = {}
|
|
): Promise<Buffer> {
|
|
const {
|
|
maxWidth = 2048,
|
|
maxHeight = 2048,
|
|
quality = this.defaultQuality,
|
|
convertToWebP = true,
|
|
} = options;
|
|
|
|
let pipeline = sharp(buffer)
|
|
.rotate() // Auto-rotate based on EXIF orientation
|
|
.resize(maxWidth, maxHeight, {
|
|
fit: 'inside',
|
|
withoutEnlargement: true,
|
|
});
|
|
|
|
if (convertToWebP) {
|
|
pipeline = pipeline.webp({ quality });
|
|
} else {
|
|
// Optimize in original format
|
|
const metadata = await sharp(buffer).metadata();
|
|
switch (metadata.format) {
|
|
case 'jpeg':
|
|
pipeline = pipeline.jpeg({ quality, mozjpeg: true });
|
|
break;
|
|
case 'png':
|
|
pipeline = pipeline.png({ compressionLevel: 9 });
|
|
break;
|
|
case 'gif':
|
|
pipeline = pipeline.gif();
|
|
break;
|
|
default:
|
|
pipeline = pipeline.webp({ quality });
|
|
}
|
|
}
|
|
|
|
return pipeline.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Generate all thumbnail sizes
|
|
*/
|
|
async generateThumbnails(
|
|
buffer: Buffer,
|
|
sizes: ImageSize[] = ['thumbnail', 'small', 'medium', 'large']
|
|
): Promise<Record<string, Buffer>> {
|
|
const thumbnails: Record<string, Buffer> = {};
|
|
|
|
await Promise.all(
|
|
sizes.map(async (size) => {
|
|
if (size === 'original') return;
|
|
|
|
const config = THUMBNAIL_CONFIGS[size];
|
|
thumbnails[size] = await this.generateThumbnail(buffer, config);
|
|
})
|
|
);
|
|
|
|
return thumbnails;
|
|
}
|
|
|
|
/**
|
|
* Generate a single thumbnail
|
|
*/
|
|
async generateThumbnail(buffer: Buffer, config: ThumbnailConfig): Promise<Buffer> {
|
|
return sharp(buffer)
|
|
.rotate() // Auto-rotate based on EXIF orientation
|
|
.resize(config.width, config.height, {
|
|
fit: config.fit,
|
|
withoutEnlargement: true,
|
|
})
|
|
.webp({ quality: 80 })
|
|
.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Convert image to WebP format
|
|
*/
|
|
async toWebP(buffer: Buffer, quality = 85): Promise<Buffer> {
|
|
return sharp(buffer)
|
|
.rotate()
|
|
.webp({ quality })
|
|
.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Crop image to specific dimensions
|
|
*/
|
|
async crop(
|
|
buffer: Buffer,
|
|
width: number,
|
|
height: number,
|
|
options: { left?: number; top?: number } = {}
|
|
): Promise<Buffer> {
|
|
const { left = 0, top = 0 } = options;
|
|
|
|
return sharp(buffer)
|
|
.extract({ left, top, width, height })
|
|
.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Smart crop using attention/entropy detection
|
|
*/
|
|
async smartCrop(buffer: Buffer, width: number, height: number): Promise<Buffer> {
|
|
return sharp(buffer)
|
|
.resize(width, height, {
|
|
fit: 'cover',
|
|
position: 'attention', // Focus on the most "interesting" part
|
|
})
|
|
.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Add a watermark to an image
|
|
*/
|
|
async addWatermark(
|
|
buffer: Buffer,
|
|
watermarkBuffer: Buffer,
|
|
options: {
|
|
position?: 'center' | 'top-left' | 'top-right' | 'bottom-left' | 'bottom-right';
|
|
opacity?: number;
|
|
} = {}
|
|
): Promise<Buffer> {
|
|
const { position = 'bottom-right', opacity = 0.5 } = options;
|
|
|
|
const image = sharp(buffer);
|
|
const metadata = await image.metadata();
|
|
const watermark = await sharp(watermarkBuffer)
|
|
.resize(Math.round((metadata.width || 500) * 0.2), null, {
|
|
withoutEnlargement: true,
|
|
})
|
|
.ensureAlpha()
|
|
.toBuffer();
|
|
|
|
const watermarkMeta = await sharp(watermark).metadata();
|
|
|
|
let gravity: sharp.Gravity;
|
|
switch (position) {
|
|
case 'top-left':
|
|
gravity = 'northwest';
|
|
break;
|
|
case 'top-right':
|
|
gravity = 'northeast';
|
|
break;
|
|
case 'bottom-left':
|
|
gravity = 'southwest';
|
|
break;
|
|
case 'bottom-right':
|
|
gravity = 'southeast';
|
|
break;
|
|
case 'center':
|
|
default:
|
|
gravity = 'center';
|
|
}
|
|
|
|
return image
|
|
.composite([
|
|
{
|
|
input: watermark,
|
|
gravity,
|
|
blend: 'over',
|
|
},
|
|
])
|
|
.toBuffer();
|
|
}
|
|
|
|
/**
|
|
* Generate a blur placeholder for progressive loading
|
|
*/
|
|
async generateBlurPlaceholder(buffer: Buffer, size = 10): Promise<string> {
|
|
const blurredBuffer = await sharp(buffer)
|
|
.resize(size, size, { fit: 'inside' })
|
|
.webp({ quality: 20 })
|
|
.toBuffer();
|
|
|
|
return `data:image/webp;base64,${blurredBuffer.toString('base64')}`;
|
|
}
|
|
|
|
/**
|
|
* Validate that buffer is a valid image
|
|
*/
|
|
async isValidImage(buffer: Buffer): Promise<boolean> {
|
|
try {
|
|
const metadata = await sharp(buffer).metadata();
|
|
return !!(metadata.width && metadata.height);
|
|
} catch {
|
|
return false;
|
|
}
|
|
}
|
|
}
|