-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsetup-bdc.sh
More file actions
342 lines (265 loc) · 9.28 KB
/
setup-bdc.sh
File metadata and controls
342 lines (265 loc) · 9.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
#!/bin/bash
set -Eeuo pipefail
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
# This is a script to create single-node Kubernetes cluster and deploy BDC on it.
#
export BDCDEPLOY_DIR=bdcdeploy
# Get password as input. It is used as default for controller, SQL Server Master instance (sa account) and Knox.
#
while true; do
read -s -p "Create Password for Big Data Cluster: " password
echo
read -s -p "Confirm your Password: " password2
echo
[ "$password" = "$password2" ] && break
echo "Password mismatch. Please try again."
done
# Name of virtualenv variable used.
#
export VIRTUALENV_NAME="bdcvenv"
export LOG_FILE="bdcdeploy.log"
export DEBIAN_FRONTEND=noninteractive
# Requirements file.
#
export REQUIREMENTS_LINK="https://aka.ms/azdata"
# Kube version.
#
KUBE_DPKG_VERSION=1.15.0-00
KUBE_VERSION=1.15.0
# Wait for 5 minutes for the cluster to be ready.
#
TIMEOUT=600
RETRY_INTERVAL=5
# Variables for pulling dockers.
#
export DOCKER_REGISTRY="mcr.microsoft.com"
export DOCKER_REPOSITORY="mssql/bdc"
export DOCKER_TAG="2019-CU4-ubuntu-16.04"
# Variables used for azdata cluster creation.
#
export AZDATA_USERNAME=admin
export AZDATA_PASSWORD=$password
export ACCEPT_EULA=yes
export CLUSTER_NAME=mssql-cluster
export STORAGE_CLASS=local-storage
export PV_COUNT="50"
IMAGES=(
mssql-app-service-proxy
mssql-control-watchdog
mssql-controller
mssql-dns
mssql-hadoop
mssql-mleap-serving-runtime
mssql-mlserver-py-runtime
mssql-mlserver-r-runtime
mssql-monitor-collectd
mssql-monitor-elasticsearch
mssql-monitor-fluentbit
mssql-monitor-grafana
mssql-monitor-influxdb
mssql-monitor-kibana
mssql-monitor-telegraf
mssql-security-domainctl
mssql-security-knox
mssql-security-support
mssql-server
mssql-server-controller
mssql-server-data
mssql-ha-operator
mssql-ha-supervisor
mssql-service-proxy
mssql-ssis-app-runtime
)
# Make a directory for installing the scripts and logs.
#
mkdir -p $BDCDEPLOY_DIR
cd $BDCDEPLOY_DIR/
touch $LOG_FILE
{
# Install all necessary packages: kuberenetes, docker, python3, python3-pip, request, azdata.
#
echo ""
echo "######################################################################################"
echo "Starting installing packages..."
# Install docker.
#
apt-get update -q
apt --yes install \
software-properties-common \
apt-transport-https \
ca-certificates \
curl
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt update -q
apt-get install -q --yes docker-ce=18.06.2~ce~3-0~ubuntu --allow-downgrades
apt-mark hold docker-ce
usermod --append --groups docker $USER
# Install python3, python3-pip, requests.
#
apt-get install -q -y python3
apt-get install -q -y python3-pip
apt-get install -y libkrb5-dev
apt-get install -y libsqlite3-dev
apt-get install -y unixodbc-dev
pip3 install requests --upgrade
# Install and create virtualenv.
#
pip3 install --upgrade virtualenv
virtualenv -p python3 $VIRTUALENV_NAME
source $VIRTUALENV_NAME/bin/activate
# Install azdata cli.
#
pip3 install -r $REQUIREMENTS_LINK
echo "Packages installed."
# Load all pre-requisites for Kubernetes.
#
echo "###########################################################################"
echo "Starting to setup pre-requisites for kubernetes..."
# Setup the kubernetes preprequisites.
#
echo $(hostname -i) $(hostname) >> /etc/hosts
swapoff -a
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
# Install docker and packages to allow apt to use a repository over HTTPS.
#
apt-get update -q
apt-get install -q -y ebtables ethtool
#apt-get install -y docker.ce
apt-get install -q -y apt-transport-https
# Setup daemon.
#
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart docker.
#
systemctl daemon-reload
systemctl restart docker
apt-get install -q -y kubelet=$KUBE_DPKG_VERSION kubeadm=$KUBE_DPKG_VERSION kubectl=$KUBE_DPKG_VERSION
# Holding the version of kube packages.
#
apt-mark hold kubelet kubeadm kubectl
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
. /etc/os-release
if [ "$UBUNTU_CODENAME" == "bionic" ]; then
modprobe br_netfilter
fi
# Disable Ipv6 for cluster endpoints.
#
sudo sysctl -w net.ipv6.conf.all.disable_ipv6=1
sudo sysctl -w net.ipv6.conf.default.disable_ipv6=1
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=1
echo net.ipv6.conf.all.disable_ipv6=1 > /etc/sysctl.conf
echo net.ipv6.conf.default.disable_ipv6=1 > /etc/sysctl.conf
echo net.ipv6.conf.lo.disable_ipv6=1 > /etc/sysctl.conf
sysctl net.bridge.bridge-nf-call-iptables=1
# Setting up the persistent volumes for the kubernetes.
#
for i in $(seq 1 $PV_COUNT); do
vol="vol$i"
mkdir -p /local-storage/$vol
mount --bind /local-storage/$vol /local-storage/$vol
done
echo "Kubernetes pre-requisites have been completed."
# Setup kubernetes cluster including remove taint on master.
#
echo ""
echo "#############################################################################"
echo "Starting to setup Kubernetes master..."
# Initialize a kubernetes cluster on the current node.
#
sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --kubernetes-version=$KUBE_VERSION
mkdir -p $HOME/.kube
mkdir -p /home/$SUDO_USER/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u $SUDO_USER):$(id -g $SUDO_USER) $HOME/.kube/config
# To enable a single node cluster remove the taint that limits the first node to master only service.
#
master_node=`kubectl get nodes --no-headers=true --output=custom-columns=NAME:.metadata.name`
kubectl taint nodes ${master_node} node-role.kubernetes.io/master:NoSchedule-
# Local storage provisioning.
#
kubectl apply -f https://raw.githubusercontent.com/rl-msft/bdc-ubuntu-single-node-vm/master/local-storage-provisioner.yaml
# Install the software defined network.
#
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# helm init
kubectl apply -f https://raw.githubusercontent.com/microsoft/sql-server-samples/master/samples/features/sql-big-data-cluster/deployment/kubeadm/ubuntu/rbac.yaml
# Verify that the cluster is ready to be used.
#
echo "Verifying that the cluster is ready for use..."
while true ; do
if [[ "$TIMEOUT" -le 0 ]]; then
echo "Cluster node failed to reach the 'Ready' state. Kubeadm setup failed."
exit 1
fi
status=`kubectl get nodes --no-headers=true | awk '{print $2}'`
if [ "$status" == "Ready" ]; then
break
fi
sleep "$RETRY_INTERVAL"
TIMEOUT=$(($TIMEOUT-$RETRY_INTERVAL))
echo "Cluster not ready. Retrying..."
done
# Install the dashboard for Kubernetes.
#
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
kubectl create clusterrolebinding kubernetes-dashboard --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
echo "Kubernetes master setup done."
# Pull docker images of SQL Server big data cluster.
#
echo ""
echo "############################################################################"
echo "Starting to pull docker images..."
echo "Pulling images from repository: " $DOCKER_REGISTRY"/"$DOCKER_REPOSITORY
for image in "${IMAGES[@]}";
do
docker pull $DOCKER_REGISTRY/$DOCKER_REPOSITORY/$image:$DOCKER_TAG
echo "Docker image" $image " pulled."
done
echo "Docker images pulled."
# Deploy azdata bdc create cluster.
#
echo ""
echo "############################################################################"
echo "Starting to deploy azdata cluster..."
# Command to create cluster for single node cluster.
#
azdata bdc config init --source kubeadm-dev-test --target kubeadm-custom -f
azdata bdc config replace -c kubeadm-custom/control.json -j ".spec.docker.repository=$DOCKER_REPOSITORY"
azdata bdc config replace -c kubeadm-custom/control.json -j ".spec.docker.registry=$DOCKER_REGISTRY"
azdata bdc config replace -c kubeadm-custom/control.json -j ".spec.docker.imageTag=$DOCKER_TAG"
azdata bdc config replace -c kubeadm-custom/bdc.json -j "$.spec.resources.data-0.spec.replicas=1"
azdata bdc config replace -c kubeadm-custom/control.json -j "spec.storage.data.className=$STORAGE_CLASS"
azdata bdc config replace -c kubeadm-custom/control.json -j "spec.storage.logs.className=$STORAGE_CLASS"
azdata bdc create -c kubeadm-custom --accept-eula $ACCEPT_EULA
echo "Big data cluster created."
# Setting context to cluster.
#
kubectl config set-context --current --namespace $CLUSTER_NAME
# Login and get endpoint list for the cluster.
#
azdata login -n $CLUSTER_NAME
azdata bdc endpoint list --output table
if [ -d "$HOME/.azdata/" ]; then
sudo chown -R $(id -u $SUDO_USER):$(id -g $SUDO_USER) $HOME/.azdata/
fi
echo "alias azdata='$BDCDEPLOY_DIR/$VIRTUALENV_NAME/bin/azdata'" >> $HOME/.bashrc
}| tee $LOG_FILE