# Terminal kamal setup kamal accent reboot loadbalancer kamal deploy rails action_text:set up rails g scaffold posts title content material:rich_text rails g job time_broadcast bundle add aws-sdk-s3 bundle add mission_control-jobs
# credentials secret_key_base: SECRET_KEY_BASE postgres: password: PASSWORD minio: root_user: minio root_password: PASSWORD endpoint: https://minio.DOMAIN
# Proxmox TEMPLATE="native:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" SWAP="512" PASSWORD="Password123" START="0" FEATURES="nesting=1" SSH_KEY="/root/.ssh/id_kobaltz.pub" BRIDGE="vmbr0" GATEWAY="192.168.1.1" IP_PREFIX="192.168.1" POOL="instance" pvesh create /swimming pools -poolid $POOL -comment "Useful resource Pool for Instance" containers=( "111 cloudflared 2 2048 10" "112 loadbalancer 2 2048 10" "113 app1 2 4096 16" "114 app2 2 4096 16" "115 worker1 2 4096 16" "116 worker2 2 4096 16" "117 database 2 8192 256" "118 minio 2 8192 256" ) for container in "${containers[@]}"; do learn -r ID HOSTNAME CORES MEMORY ROOTFS <<< "$container" IP="$IP_PREFIX.$ID" pct create "$ID" "$TEMPLATE" --swap "$SWAP" --password "$PASSWORD" --start "$START" --hostname "$HOSTNAME" --features "$FEATURES" --ssh-public-keys "$SSH_KEY" --cores "$CORES" --memory "$MEMORY" --rootfs "local-lvm:$ROOTFS" --net0 "title=eth0,bridge=$BRIDGE,ip=$IP/24,gw=$GATEWAY" --pool "$POOL" finished for ct in {111..118}; do pct exec $ct -- bash -c "apt replace -y && apt improve -y && apt set up fail2ban -y" finished
# config/deploy.yml # Title of your software. Used to uniquely configure containers. service: instance # Title of the container picture. picture: kobaltz/instance # Deploy to those servers. servers: internet: - 192.168.1.113 - 192.168.1.114 job: hosts: - 192.168.1.115 - 192.168.1.116 cmd: bin/jobs # Allow SSL auto certification by way of Let's Encrypt and permit for a number of apps on a single internet server. # Take away this part when utilizing a number of internet servers and make sure you terminate SSL at your load balancer. # # Observe: If utilizing Cloudflare, set encryption mode in SSL/TLS setting to "Full" to allow CF-to-app encryption. proxy: ssl: false host: www.railsenv.com # Credentials in your picture host. registry: # Specify the registry server, when you're not utilizing Docker Hub # server: registry.digitalocean.com / ghcr.io / ... username: kobaltz # At all times use an entry token relatively than actual password when doable. password: - KAMAL_REGISTRY_PASSWORD # Inject ENV variables into containers (secrets and techniques come from .kamal/secrets and techniques). env: secret: - RAILS_MASTER_KEY # clear: # Run the Strong Queue Supervisor inside the net server's Puma course of to do jobs. # Once you begin utilizing a number of servers, it's best to break up out job processing to a devoted machine. # SOLID_QUEUE_IN_PUMA: true # Set variety of processes devoted to Strong Queue (default: 1) # JOB_CONCURRENCY: 3 # Set variety of cores out there to the appliance on every server (default: 1). # WEB_CONCURRENCY: 2 # Match this to any exterior database server to configure Lively File appropriately # Use example-db for a db accent server on identical machine by way of native kamal docker community. # DB_HOST: 192.168.1.2 # Log every thing from Rails # RAILS_LOG_LEVEL: debug # Aliases are triggered with "bin/kamal <alias>". You possibly can overwrite arguments on invocation: # "bin/kamal logs -r job" will tail logs from the primary server within the job part. aliases: console: app exec --interactive --reuse "bin/rails console" shell: app exec --interactive --reuse "bash" logs: app logs -f dbc: app exec --interactive --reuse "bin/rails dbconsole" # Use a persistent storage quantity for sqlite database information and native Lively Storage information. # Beneficial to alter this to a mounted quantity path that's backed up off server. volumes: - "example_storage:/rails/storage" # Bridge fingerprinted belongings, like JS and CSS, between variations to keep away from # hitting 404 on in-flight requests. Combines all information from new and previous # model contained in the asset_path. asset_path: /rails/public/belongings # Configure the picture builder. builder: arch: amd64 # # Construct picture by way of distant server (helpful for quicker amd64 builds on arm64 computer systems) # distant: ssh://docker@docker-builder-server # # # Go arguments and secrets and techniques to the Docker construct course of # args: # RUBY_VERSION: ruby-3.3.5 # secrets and techniques: # - GITHUB_TOKEN # - RAILS_MASTER_KEY # Use a special ssh consumer than root # ssh: # consumer: app # Use accent providers (secrets and techniques come from .kamal/secrets and techniques). equipment: loadbalancer: picture: nginx:newest host: 192.168.1.112 port: "80:80" information: - config/nginx.conf:/and so on/nginx/conf.d/default.conf postgres: picture: postgres:17 port: 5432:5432 host: 192.168.1.117 env: clear: POSTGRES_USER: instance POSTGRES_DB: example_production secret: - POSTGRES_PASSWORD directories: - information:/var/lib/postgresql/information minio: picture: minio/minio host: 192.168.1.118 choices: publish: - "9000:9000" - "9001:9001" env: secret: - MINIO_ROOT_USER - MINIO_ROOT_PASSWORD directories: - information:/information cmd: server /information --console-address ":9001" # db: # picture: mysql:8.0 # host: 192.168.0.2 # # Change to 3306 to show port to the world as an alternative of simply native community. # port: "127.0.0.1:3306:3306" # env: # clear: # MYSQL_ROOT_HOST: '%' # secret: # - MYSQL_ROOT_PASSWORD # information: # - config/mysql/manufacturing.cnf:/and so on/mysql/my.cnf # - db/manufacturing.sql:/docker-entrypoint-initdb.d/setup.sql # directories: # - information:/var/lib/mysql # redis: # picture: redis:7.0 # host: 192.168.0.2 # port: 6379 # directories: # - information:/information
# config/nginx.conf upstream backend { server 192.168.1.113; server 192.168.1.114; } server { pay attention 80; location / { proxy_set_header Host $host; proxy_set_header X-Actual-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_pass http://backend; } location /cable { proxy_pass http://backend/cable; proxy_http_version 1.1; proxy_set_header Improve websocket; proxy_set_header Connection Improve; proxy_set_header Host $http_host; proxy_set_header X-Actual-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } }
# .kamal/secrets and techniques # Seize the registry password from ENV KAMAL_REGISTRY_PASSWORD=$KAMAL_REGISTRY_PASSWORD # Enhance safety by utilizing a password supervisor. By no means examine config/grasp.key into git! RAILS_MASTER_KEY=$(cat config/grasp.key) POSTGRES_PASSWORD=$(bin/rails runner "places Rails.software.credentials.dig(:postgres, :password)") MINIO_ROOT_USER=$(bin/rails runner "places Rails.software.credentials.dig(:minio, :root_user)") MINIO_ROOT_PASSWORD=$(bin/rails runner "places Rails.software.credentials.dig(:minio, :root_password)")
# config/storage.yml minio: service: S3 access_key_id: <%= Rails.software.credentials.minio.root_user %> secret_access_key: <%= Rails.software.credentials.minio.root_password %> area: us-east-1 bucket: instance endpoint: <%= Rails.software.credentials.minio.endpoint %> force_path_style: true
# config/environments/manufacturing.rb config.active_storage.service = :minio
# config/routes.rb mount MissionControl::Jobs::Engine, at: "/jobs", as: :jobs
# app/views/layouts/_navigation_links.html.erb <%= turbo_stream_from :time %> <li class="nav-item me-4"> <%= link_to "", "#", id: :time, class: 'nav-link' %> </li>
# app/jobs/time_broadcast_job.rb class TimeBroadcastJob < ApplicationJob queue_as :default def carry out current_time = Time.present.strftime("%-I:%M:%S") Turbo::StreamsChannel.broadcast_replace_to( :time, goal: "time", html: "<a href="#" id='time' class="nav-link">#{current_time}</a>" ) finish finish
# config/recurring.yml time_broadcast: class: TimeBroadcastJob queue: background schedule: each second