-
Notifications
You must be signed in to change notification settings - Fork 0
/
Taskfile.yml
125 lines (99 loc) · 5.1 KB
/
Taskfile.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
version: '2'
env:
KUBECONFIG: PATH_TO_KUBECONFIG
HELM_VERSION: helm_2_14_3
TILLER_VERSION: v2.14.1
HELM_RELEASE: scape-goat
HELM_CHART: scape-goat
NAMESPACE: kube-loader
SSH_USER: ec2-user
NODE_IP: 10.134.116.67
tasks:
bootstrap_node:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
- ssh ${SSH_USER}@${NODE_IP} sudo yum install stress-ng
bootstrap:
#By default, tasks will be executed in the directory where the Taskfile is located.
#But you can easily make the task run in another folder informing dir
#dir: change/directory
cmds:
- echo $KUBECONFIG
- cmd: kubectl create namespace ${NAMESPACE}
ignore_error: true
- cmd: kubectl create clusterrolebinding ${NAMESPACE}-admin --clusterrole=cluster-admin --serviceaccount=${NAMESPACE}:default
ignore_error: true
- ${HELM_VERSION} init --tiller-image gcr.io/kubernetes-helm/tiller:${TILLER_VERSION} --tiller-namespace ${NAMESPACE}
- sleep 30s
deploy:
cmds:
- ${HELM_VERSION} upgrade --install ${HELM_RELEASE} ./charts/${HELM_CHART} --tiller-namespace ${NAMESPACE} --namespace ${NAMESPACE}
delete:
cmds:
- ${HELM_VERSION} delete --purge ${HELM_RELEASE} --tiller-namespace ${NAMESPACE}
load_cpu_node:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo stress-ng --cpu 6 --timeout 300s --metrics-brief
load_memory_node:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo stress-ng --vm 2 --vm-bytes 13G --timeout 600s
load_disk_node:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo rm -rf /root/file
- ssh ${SSH_USER}@${NODE_IP} sudo dd if=/dev/zero of=/root/file bs=1000M count=85
cleanup_disk_node:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo rm -rf /root/file
stop_docker:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo systemctl stop docker.service
start_docker:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo systemctl start docker.service
start_kubelet:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo systemctl start kubelet.service
stop_kubelet:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo systemctl stop kubelet.service
status_kubelet:
cmds:
- ssh ${SSH_USER}@${NODE_IP} sudo systemctl status kubelet.service
load_cpu:
cmds:
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- cat /dev/urandom | gzip -9 > /dev/null
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- free
clear_memory:
cmds:
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- rm -rf /data/file
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- free
load_memory:
cmds:
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- rm -rf /data/file
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- dd if=/dev/zero of=/data/file bs=1M count=256
increase_replicas:
cmds:
- kubectl scale --replicas=3 deployment/${HELM_RELEASE} --namespace ${NAMESPACE}
- kubectl get deployments -n ${NAMESPACE} ${HELM_RELEASE}
load_disk:
cmds:
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- rm -rf /data/file
- cmd: kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n kube-loader -- dd if=/dev/zero of=/data/file bs=1M count=1900
ignore_error: true
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -h | grep /data
disk_usage:
cmds:
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -h | grep /data
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -i | grep /data
clean_disk:
cmds:
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- rm -rf /data/file
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -h | grep /data
clean_inodes:
cmds:
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- find /data -name '*.tmp'
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -i | grep /data
load_inodes:
cmds:
- kubectl exec -t $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- ./scripts/load_inodes.sh
- kubectl exec $(kubectl get pods -n ${NAMESPACE} | grep -m1 ${HELM_RELEASE}|awk '{ print $1 }') -n ${NAMESPACE} -- df -i /data