kubectl apply -f path/file.yaml

kubectl apply -f path

kubectl -n example get pods

kubectl -n example get svc

cat deploy.yaml | sed /s/$old/$new/g | kubectl -f –

kustomize/kustomization.yaml

recources:
-application/namespace.yaml
-application/deployment.yaml
-application/service.yaml
-application/configmap.yaml

bases:
– ../../application
patchtes:
– replica_count.yaml
configMapGenerator:
-name: example.config
namespace: example
behaviour: replace
files:
-/configs/config.json


kubectl kustomize -k .\kubernetes\kustomize

kubectl kustomize .\kubernetes\kustomize | kubectl apply -f –

config.json

kubectl -n example get cm

kubectl -n example get cm example example-deploy(pod) config -o yaml

env.yaml

kubectl -n example get deply -o yaml

https://kubectl.docs.kubernetes.io/

Upload a file: gcloud compute scp /home/paede/Downloads/drecs_2020-12-02.sql emarketing:~

gcloud compute instances create gcelab2 –machine-type n1-standard-2 –zone us-central1-c

gcloud compute ssh gcelab2 –zone us-central1-c

gcloud compute project-info describe –project <your_project_ID>

gcloud compute instances create –help

gcloud components list

gcloud beta interactive

gcloud container clusters create [CLUSTER-NAME]

gcloud container clusters get-credentials [CLUSTER-NAME]

kubectl create deployment hello-server –image=gcr.io/google-samples/hello-app:1.0

kubectl expose deployment hello-server –type=LoadBalancer –port 8080

kubectl get service

gcloud container clusters delete [CLUSTER-NAME]

cat << EOF > startup.sh #! /bin/bash apt-get update apt-get install -y nginx service nginx start sed -i — ‘s/nginx/Google Cloud Platform – ‘”\$HOSTNAME”‘/’ /var/www/html/index.nginx-debian.html EOF

gcloud compute instance-templates create nginx-template \ –metadata-from-file startup-script=startup.sh

gcloud compute target-pools create nginx-pool

gcloud compute instance-groups managed create nginx-group \ –base-instance-name nginx \ –size 2 \ –template nginx-template \ –target-pool nginx-pool

gcloud compute instances list

gcloud compute firewall-rules create www-firewall –allow tcp:80

gcloud compute forwarding-rules create nginx-lb \ –region us-central1 \ –ports=80 \ –target-pool nginx-pool

gcloud compute forwarding-rules list

gcloud compute http-health-checks create http-basic-check

gcloud compute instance-groups managed \ set-named-ports nginx-group \ –named-ports http:80

gcloud compute backend-services create nginx-backend \ –protocol HTTP –http-health-checks http-basic-check –global

gcloud compute url-maps create web-map \ –default-service nginx-backend

gsutil mb gs://<BUCKET_NAME>

echo INFRACLASS_PROJECT_ID=$INFRACLASS_PROJECT_ID >> ~/infraclass/config

source infraclass/config echo $INFRACLASS_PROJECT_ID

export Project1=sdjfalkjdfasljfaö

echo $Project1

2

Monitor Agen

curl -sSO https://dl.google.com/cloudagents/add-monitoring-agent-repo.sh
3 sudo bash add-monitoring-agent-repo.sh
4 sudo apt-get update
5 sudo apt-cache madison stackdriver-agent
6 sudo apt-get install -y ‘stackdriver-agent=6.*’
7 sudo service stackdriver-agent start
8 sudo service stackdriver-agent status

Logging Agen


   10  curl -sSO https://dl.google.com/cloudagents/add-logging-agent-repo.sh
sudo bash add-logging-agent-repo.shs
sudo apt-get update
   15  sudo apt-cache madison google-fluentd
   16  sudo apt-get install -y 'google-fluentd=1.*'
   17  sudo apt-get install -y google-fluentd-catch-all-config
   18  sudo service google-fluentd start
   19  sudo service google-fluentd status

MySQL Agent

https://cloud.google.com/monitoring/agent/plugins/mysql

sed -i -e “s/ZONE/$MY_ZONE/” mydeploy.yaml

  resources:
  - name: my-vm
    type: compute.v1.instance
    properties:
      zone: us-central1-a
      machineType: zones/us-central1-a/machineTypes/n1-standard-1
      metadata:
        items:
        - key: startup-script
          value: "apt-get update"
      disks:
      - deviceName: boot
        type: PERSISTENT
        boot: true
        autoDelete: true
        initializeParams:
          sourceImage: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-9-stretch-v20180806
      networkInterfaces:
      - network: https://www.googleapis.com/compute/v1/projects/qwiklabs-gcp-dcdf854d278b50cd/global/networks/default
        accessConfigs:
        - name: External NAT
          type: ONE_TO_ONE_NAT

gcloud deployment-manager deployments create my-first-depl –config mydeploy.yaml

gcloud deployment-manager deployments update my-first-depl –config mydeploy.yaml

how to install it https://cloud.google.com/storage/docs/gsutil_install#deb

see rsync options https://cloud.google.com/storage/docs/gsutil/commands/rsync

gsutil rsync -r -d importedCSV gs://ak-test-imports

enter in any ssh

gcloud compute ssh nativelara

see ssh option on gcloud https://cloud.google.com/sdk/gcloud/reference/compute/ssh

add a large file to cloud storage via cloud console:
curl "sourcefile" | gsutil cp - gs://spclimatedata/"anyname"

On Ubuntu 20.04
https://github.com/tducasse/go-instabot

sudo apt install golang
go get github.com/tducasse/go-instabot
~/go/bin/hello

cd ~/go/src/github.com/tducasse/go-instabot
vim config/config.json
~/go/bin/go-instabot -run

Project side on http://www.dirvish.org/
https://github.com/dirvish/dirvish
http://www.dirvish.org/ssh.html

# Server in Backup aufnehmen

Jedes Backup hat ein eigenes LVM mit einem Mountpunkt unter
`/mnt/backup/{server}`

* Mit `vgdisplay` freien Speicherplatz prüfen (Punkt `Free  PE / Size`)
* Mountpunkt erstellen mit `mkdir /mnt/backup/{server}`
* Mit `lvcreate -L 100G -n {server} VG01_srsouthp08` ein neues logical
volume erstellen (die 100G sollten je nach System angepasst werden)
* Mit `mkfs.ext4 /dev/VG01_srsouthp08/{server}` neues Volume formatieren
* Mit `mount /dev/VG01_srsouthp08/{server} /mnt/backup/{server}` das
Volume mounten
* Einen entsprechenden Eintrag in der `fstab` vornehmen:
`/dev/mapper/VG01_srsouthp08-{server}   /mnt/backup/{server}    ext4
defaults,_netdev        0       0
`
* Den Ordner `/mnt/backup/{server}/dirvish` erstellen und darin die
`default.conf` anlegen.
* In der Datei `/etc/dirvish/backup-dirvish.conf` ganz Unten den Server
in die Variable `VAULTS` eintragen
* In der Datei `/etc/cron.d/adfinis-dirvish-backup` einen Cronjob für
den Server erstellen
* Erstes Backup mit `dirvish --vault {server} --init` erstellen.
Achtung!: Dies kann mehrere Stunden dauern und sollte deswegen in einem
`screen` laufen.

On Client add key, exclude hughe archives and make sure the database is in a state where the backup can be used or it has a dump from it.
Test with dirvish --no-run --vault {server}


https://docs.zammad.org/en/latest/appendix/backup-and-restore.html