MinIO Storage Service


MinIO is the S3-compatible object storage service in nself, providing scalable file storage for your applications. This guide covers configuration, usage, and integration with your backend services.

What is MinIO?

MinIO is a high-performance, distributed object storage server designed for private cloud infrastructure. In nself, MinIO provides:

  • S3 Compatibility: Full compatibility with Amazon S3 APIs
  • File Storage: Store images, documents, videos, and any binary data
  • Scalable Architecture: Horizontal scaling for high availability
  • Web Console: Built-in browser-based management interface
  • Security: Advanced access controls and encryption

S3 Compatible

MinIO is fully compatible with AWS S3 APIs, so you can use any S3 SDK or tool with your MinIO storage.

Accessing MinIO

Once your nself environment is running, you can access MinIO services:

# Start your nself environment
nself up

# Access MinIO Console:
# Development: http://localhost:9001
# With SSL: https://storage.local.nself.org

# API Endpoint:
# Development: http://localhost:9000
# With SSL: https://storage-api.local.nself.org

Default Credentials

# Default MinIO credentials (change in production!)
Username: minioadmin
Password: minioadmin

# Configure in .env.local
MINIO_ROOT_USER=your-username
MINIO_ROOT_PASSWORD=your-secure-password

Configuration

Basic Configuration

# MinIO settings in .env.local
MINIO_ENABLED=true
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=minioadmin
MINIO_DEFAULT_BUCKETS=uploads,avatars,documents

# Storage paths
MINIO_DATA_DIR=./storage/minio/data
MINIO_CONFIG_DIR=./storage/minio/config

# Network settings
MINIO_API_PORT=9000
MINIO_CONSOLE_PORT=9001
MINIO_ADDRESS=localhost:9000
MINIO_CONSOLE_ADDRESS=localhost:9001

Production Configuration

# Production security settings
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=very-secure-password-here

# SSL/TLS configuration
MINIO_SSL_ENABLED=true
MINIO_SSL_CERT_PATH=/etc/ssl/certs/minio.crt
MINIO_SSL_KEY_PATH=/etc/ssl/private/minio.key

# Domain configuration
MINIO_SERVER_URL=https://storage.myapp.com
MINIO_BROWSER_REDIRECT_URL=https://console.storage.myapp.com

# Performance settings
MINIO_CACHE_DRIVES=/tmp/cache1,/tmp/cache2
MINIO_CACHE_EXCLUDE="*.jpg,*.jpeg,*.png"
MINIO_CACHE_QUOTA=80
MINIO_CACHE_AFTER=3
MINIO_CACHE_WATERMARK_LOW=70
MINIO_CACHE_WATERMARK_HIGH=90

Bucket Management

Automatic Bucket Creation

# Configure default buckets in .env.local
MINIO_DEFAULT_BUCKETS=uploads,avatars,documents,backups

# Buckets are created automatically on startup
# with default permissions

Manual Bucket Management

# Using MinIO CLI (mc)
# Install MinIO client
nself minio install-cli

# Configure connection
nself mc alias set local http://localhost:9000 minioadmin minioadmin

# Create bucket
nself mc mb local/my-new-bucket

# List buckets
nself mc ls local

# Set bucket policy
nself mc policy set public local/uploads

# Copy files
nself mc cp /path/to/file.txt local/uploads/

SDK Integration

JavaScript/TypeScript (NestJS)

// Install AWS SDK
npm install aws-sdk @types/aws-sdk

// Configure S3 client for MinIO
import { S3 } from 'aws-sdk';

@Injectable()
export class StorageService {
  private s3: S3;

  constructor() {
    this.s3 = new S3({
      endpoint: process.env.MINIO_ENDPOINT || 'http://localhost:9000',
      accessKeyId: process.env.MINIO_ACCESS_KEY || 'minioadmin',
      secretAccessKey: process.env.MINIO_SECRET_KEY || 'minioadmin',
      s3ForcePathStyle: true, // Important for MinIO
      signatureVersion: 'v4'
    });
  }

  async uploadFile(bucket: string, key: string, file: Buffer, contentType?: string) {
    const params = {
      Bucket: bucket,
      Key: key,
      Body: file,
      ContentType: contentType
    };

    return this.s3.upload(params).promise();
  }

  async downloadFile(bucket: string, key: string) {
    const params = {
      Bucket: bucket,
      Key: key
    };

    return this.s3.getObject(params).promise();
  }

  async deleteFile(bucket: string, key: string) {
    const params = {
      Bucket: bucket,
      Key: key
    };

    return this.s3.deleteObject(params).promise();
  }

  async getSignedUrl(bucket: string, key: string, expires: number = 3600) {
    const params = {
      Bucket: bucket,
      Key: key,
      Expires: expires
    };

    return this.s3.getSignedUrl('getObject', params);
  }
}

Python (FastAPI)

# Install boto3
pip install boto3

from boto3 import client
import os

class StorageService:
    def __init__(self):
        self.s3 = client(
            's3',
            endpoint_url=os.getenv('MINIO_ENDPOINT', 'http://localhost:9000'),
            aws_access_key_id=os.getenv('MINIO_ACCESS_KEY', 'minioadmin'),
            aws_secret_access_key=os.getenv('MINIO_SECRET_KEY', 'minioadmin'),
            region_name='us-east-1'
        )
    
    def upload_file(self, bucket: str, key: str, file_data: bytes, content_type: str = None):
        extra_args = {}
        if content_type:
            extra_args['ContentType'] = content_type
            
        return self.s3.put_object(
            Bucket=bucket,
            Key=key,
            Body=file_data,
            **extra_args
        )
    
    def download_file(self, bucket: str, key: str):
        response = self.s3.get_object(Bucket=bucket, Key=key)
        return response['Body'].read()
    
    def delete_file(self, bucket: str, key: str):
        return self.s3.delete_object(Bucket=bucket, Key=key)
    
    def generate_presigned_url(self, bucket: str, key: str, expiration: int = 3600):
        return self.s3.generate_presigned_url(
            'get_object',
            Params={'Bucket': bucket, 'Key': key},
            ExpiresIn=expiration
        )

Go

package storage

import (
    "context"
    "io"
    "time"

    "github.com/minio/minio-go/v7"
    "github.com/minio/minio-go/v7/pkg/credentials"
)

type StorageService struct {
    client *minio.Client
}

func NewStorageService(endpoint, accessKey, secretKey string, useSSL bool) (*StorageService, error) {
    client, err := minio.New(endpoint, &minio.Options{
        Creds:  credentials.NewStaticV4(accessKey, secretKey, ""),
        Secure: useSSL,
    })
    if err != nil {
        return nil, err
    }

    return &StorageService{client: client}, nil
}

func (s *StorageService) UploadFile(ctx context.Context, bucket, key string, reader io.Reader, size int64, contentType string) error {
    _, err := s.client.PutObject(ctx, bucket, key, reader, size, minio.PutObjectOptions{
        ContentType: contentType,
    })
    return err
}

func (s *StorageService) DownloadFile(ctx context.Context, bucket, key string) (*minio.Object, error) {
    return s.client.GetObject(ctx, bucket, key, minio.GetObjectOptions{})
}

func (s *StorageService) DeleteFile(ctx context.Context, bucket, key string) error {
    return s.client.RemoveObject(ctx, bucket, key, minio.RemoveObjectOptions{})
}

func (s *StorageService) GeneratePresignedURL(ctx context.Context, bucket, key string, expiry time.Duration) (string, error) {
    return s.client.PresignedGetObject(ctx, bucket, key, expiry, nil)
}

File Upload Patterns

Direct Upload from Frontend

// Generate presigned URL on backend
@Post('upload-url')
async getUploadUrl(
  @Body() body: { bucket: string; key: string; contentType: string }
) {
  const signedUrl = await this.s3.getSignedUrl('putObject', {
    Bucket: body.bucket,
    Key: body.key,
    ContentType: body.contentType,
    Expires: 300 // 5 minutes
  });

  return { uploadUrl: signedUrl };
}

// Frontend uploads directly to MinIO
const uploadFile = async (file: File) => {
  // Get signed URL from backend
  const response = await fetch('/api/upload-url', {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({
      bucket: 'uploads',
      key: file.name,
      contentType: file.type
    })
  });

  const { uploadUrl } = await response.json();

  // Upload directly to MinIO
  await fetch(uploadUrl, {
    method: 'PUT',
    body: file,
    headers: {
      'Content-Type': file.type
    }
  });
};

Proxy Upload through Backend

// Upload through backend API
@Post('upload')
@UseInterceptors(FileInterceptor('file'))
async uploadFile(
  @UploadedFile() file: Express.Multer.File,
  @Body() body: { bucket?: string }
) {
  const bucket = body.bucket || 'uploads';
  const key = `${Date.now()}-${file.originalname}`;

  await this.storageService.uploadFile(
    bucket,
    key,
    file.buffer,
    file.mimetype
  );

  return {
    bucket,
    key,
    url: `/api/files/${bucket}/${key}`
  };
}

// Serve files through backend
@Get('files/:bucket/:key')
async serveFile(
  @Param('bucket') bucket: string,
  @Param('key') key: string,
  @Res() res: Response
) {
  try {
    const object = await this.storageService.downloadFile(bucket, key);
    res.setHeader('Content-Type', object.ContentType || 'application/octet-stream');
    res.send(object.Body);
  } catch (error) {
    res.status(404).send('File not found');
  }
}

Bucket Policies & Security

Public Read Bucket

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "AWS": ["*"]
      },
      "Action": ["s3:GetObject"],
      "Resource": ["arn:aws:s3:::uploads/*"]
    }
  ]
}

User-Specific Access

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "AWS": ["arn:aws:iam::123456789:user/alice"]
      },
      "Action": [
        "s3:GetObject",
        "s3:PutObject",
        "s3:DeleteObject"
      ],
      "Resource": ["arn:aws:s3:::private-files/alice/*"]
    }
  ]
}

Setting Policies via CLI

# Set bucket policy
nself mc policy set public local/uploads
nself mc policy set private local/documents
nself mc policy set download local/downloads

# Custom policy file
nself mc policy set /path/to/custom-policy.json local/my-bucket

Advanced Features

Server-Side Encryption

# Enable default encryption for bucket
MINIO_KMS_AUTO_ENCRYPTION=on

# Set encryption via CLI
nself mc encrypt set sse-s3 local/encrypted-bucket

# Upload with specific encryption
nself mc cp --encrypt-key "local/uploads/secret.txt:32byteslongsecretkeymustbegiven" file.txt local/uploads/

Event Notifications

# Configure webhook notifications
nself mc event add local/uploads arn:minio:sqs::webhook:http://webhook-service:3000/storage-event --event put,delete

# List configured events
nself mc event list local/uploads

Lifecycle Management

<!-- lifecycle.xml -->
<LifecycleConfiguration>
    <Rule>
        <ID>TempFilesRule</ID>
        <Status>Enabled</Status>
        <Filter>
            <Prefix>temp/</Prefix>
        </Filter>
        <Expiration>
            <Days>7</Days>
        </Expiration>
    </Rule>
    <Rule>
        <ID>OldLogsRule</ID>
        <Status>Enabled</Status>
        <Filter>
            <Prefix>logs/</Prefix>
        </Filter>
        <Expiration>
            <Days>30</Days>
        </Expiration>
    </Rule>
</LifecycleConfiguration>
# Apply lifecycle policy
nself mc ilm import local/my-bucket < lifecycle.xml

Backup and Replication

Cross-Region Replication

# Set up replication to remote MinIO
nself mc alias set remote https://remote-minio.example.com access-key secret-key

# Enable versioning (required for replication)
nself mc version enable local/my-bucket
nself mc version enable remote/my-bucket

# Configure replication
nself mc replicate add local/my-bucket --remote-bucket remote/my-bucket --priority 1

Backup to External Storage

# Mirror to AWS S3
nself mc alias set aws-s3 https://s3.amazonaws.com aws-access-key aws-secret-key

# Sync buckets
nself mc mirror local/backups aws-s3/my-app-backups

# Automated backup script
#!/bin/bash
DATE=$(date +%Y-%m-%d)
nself mc cp --recursive local/important-data aws-s3/backups/$DATE/

Monitoring and Metrics

Health Checks

# Check MinIO health
curl http://localhost:9000/minio/health/live
curl http://localhost:9000/minio/health/ready

# Get server info
nself mc admin info local

# Check bucket usage
nself mc du local/uploads

Prometheus Metrics

# Enable Prometheus metrics
MINIO_PROMETHEUS_AUTH_TYPE=public

# Metrics endpoint
curl http://localhost:9000/minio/metrics/cluster

Performance Optimization

Multi-part Upload

// Automatic multi-part upload for large files
const uploadLargeFile = async (file: File) => {
  const upload = new AWS.S3.ManagedUpload({
    params: {
      Bucket: 'large-files',
      Key: file.name,
      Body: file
    },
    partSize: 10 * 1024 * 1024, // 10 MB parts
    queueSize: 1,
    service: s3Client
  });

  return upload.promise();
};

Connection Pooling

// Configure connection pooling
const s3 = new AWS.S3({
  endpoint: 'http://localhost:9000',
  httpOptions: {
    agent: new https.Agent({
      maxSockets: 25,
      keepAlive: true,
      keepAliveMsecs: 1000
    })
  }
});

Integration with Hasura

File Metadata in Database

-- Store file metadata
CREATE TABLE file_uploads (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  bucket VARCHAR(255) NOT NULL,
  key VARCHAR(255) NOT NULL,
  original_name VARCHAR(255) NOT NULL,
  content_type VARCHAR(255),
  size_bytes BIGINT,
  user_id UUID REFERENCES users(id),
  created_at TIMESTAMP DEFAULT NOW(),
  UNIQUE(bucket, key)
);

GraphQL File Operations

# Upload file and save metadata
mutation UploadFile {
  insert_file_uploads_one(object: {
    bucket: "uploads"
    key: "2024/user-avatar.jpg"
    original_name: "avatar.jpg"
    content_type: "image/jpeg"
    size_bytes: 1024576
    user_id: "user-id-here"
  }) {
    id
    bucket
    key
    original_name
  }
}

# Query user files
query UserFiles($user_id: uuid!) {
  file_uploads(where: {user_id: {_eq: $user_id}}) {
    id
    bucket
    key
    original_name
    content_type
    size_bytes
    created_at
  }
}

Troubleshooting

Common Issues

# Check MinIO logs
nself logs minio

# Test connectivity
nself mc admin info local

# Check disk usage
nself mc du --recursive local

# Verify bucket permissions
nself mc ls local/my-bucket
nself mc policy get local/my-bucket

Performance Issues

# Check server performance
nself mc admin speedtest local

# Monitor active connections
nself mc admin trace local

# Check for errors
nself mc admin logs local --type error

Best Practices

Security

  • Change Default Credentials: Always use strong, unique passwords
  • Use HTTPS: Enable SSL/TLS for production deployments
  • Least Privilege: Grant minimal required permissions
  • Regular Updates: Keep MinIO updated to the latest version

Performance

  • Dedicated Storage: Use fast SSDs for storage directories
  • Network Optimization: Use high-bandwidth network connections
  • Connection Pooling: Configure appropriate connection pools
  • Monitoring: Monitor performance metrics and set up alerts

Next Steps

Now that you understand MinIO storage in nself:

MinIO provides enterprise-grade object storage that scales with your application. Use it to store any type of file while maintaining high performance and reliability.