|
| 1 | +#!/bin/bash |
1 | 2 | #
|
2 | 3 | # Openstack specific functions
|
3 | 4 | #
|
|
21 | 22 | #
|
22 | 23 | ################################################################
|
23 | 24 |
|
| 25 | + |
| 26 | +_get_field() { |
| 27 | + echo -n "$1"|perl -n -e "\$_ =~ /^\|\s+$2\s+\|\s+(\S*)\s+\|\$/ && print \$1" |
| 28 | +} |
| 29 | + |
| 30 | +_get_disk_state() { |
| 31 | + OUT=$($CINDERCLIENT show "$1") |
| 32 | + _get_field "$OUT" status |
| 33 | +} |
| 34 | + |
| 35 | +cinder_volume2id() { |
| 36 | + VM_VOL_NAME=$1 |
| 37 | + OUT=$($CINDERCLIENT show $VM_VOL_NAME) |
| 38 | + _get_field "$OUT" id |
| 39 | +} |
| 40 | + |
24 | 41 | cloud_volume_attach_openstack() {
|
25 | 42 | local VM_SERVER="$1"
|
26 | 43 | local VM_VOL_NAME="$2"
|
27 |
| - local VM_VOL_DEV="$3" |
| 44 | + local VM_VOL_ID=$(cinder_volume2id $VM_VOL_NAME) |
| 45 | + |
| 46 | + # Example output of "nova volume-attach |
| 47 | + # +----------+--------------------------------------+ |
| 48 | + # | Property | Value | |
| 49 | + # +----------+--------------------------------------+ |
| 50 | + # | device | /dev/vdb | |
| 51 | + # | id | 793e9a04-7068-4cf1-86e7-26509f709b54 | |
| 52 | + # | serverId | 175e470c-5869-4425-988a-6b334a2fa655 | |
| 53 | + # | volumeId | 793e9a04-7068-4cf1-86e7-26509f709b54 | |
| 54 | + # +----------+--------------------------------------+ |
| 55 | + OUT=$($NOVACLIENT volume-attach "$VM_SERVER" "$VM_VOL_ID") |
28 | 56 |
|
29 |
| - if ! nova volume-attach "$VM_SERVER" "$VM_VOL_NAME" "$VM_VOL_DEV"; then |
| 57 | + if [ $? -gt 0 ] ; then |
30 | 58 | echo "ERROR: nova attach failed. $?" >&2
|
31 | 59 | return 3
|
32 | 60 | fi
|
| 61 | + device_path=`_get_field "$OUT" device` |
| 62 | + serverId=`_get_field "$OUT" serverId` |
33 | 63 | while true; do
|
34 |
| - state=`nova volume-show "$VM_VOL_NAME" | sed -n 's,^|[ ]*status[ ]*|[ ]*\([^ ]*\).*,\1,p'` |
| 64 | + state=`_get_disk_state "$VM_VOL_NAME"` |
35 | 65 | test "$state" = "in-use" && break
|
36 | 66 | if test -z "$state" ; then
|
37 | 67 | echo "ERROR: unable to find state of volume $VM_VOL_NAME" >&2
|
38 | 68 | return 3
|
39 | 69 | fi
|
40 | 70 | if test "$state" = available ; then
|
41 | 71 | echo "WARNING: volume $VM_VOL_NAME got not attached, retrying" >&2
|
42 |
| - if ! nova volume-attach "$VM_SERVER" "$VM_VOL_NAME" "$VM_VOL_DEV"; then |
| 72 | + OUT=`$NOVACLIENT volume-attach "$VM_SERVER" "$VM_VOL_ID"` |
| 73 | + |
| 74 | + if [ $? -gt 0 ] ; then |
43 | 75 | echo "ERROR: nova attach failed. $?" >&2
|
44 |
| - return 3 |
45 |
| - fi |
| 76 | + return 3 |
| 77 | + fi |
| 78 | + device_path=`_get_field "$OUT" device` |
46 | 79 | fi
|
47 | 80 | sleep 3
|
48 | 81 | done
|
49 |
| - if test ! -e "$VM_VOL_DEV" ; then |
50 |
| - #GROSS HACK: kernel does not care about the given device name |
51 |
| -# VM_VOL_DEV="/dev/"`dmesg| sed -n 's,.*\(vd.\): unknown partition tab.*,\1,p' | tail -n 1` |
52 |
| - VM_VOL_DEV=`ls -1 /dev/vd? | tail -n 1` |
53 |
| - fi |
54 |
| - echo "$VM_VOL_DEV" |
| 82 | + |
| 83 | + # Example: |
| 84 | + # 793e9a04-7068-4cf1-86e7-26509f709b54:175e470c-5869-4425-988a-6b334a2fa655:/dev/vdb |
| 85 | + echo "$VM_VOL_ID:$serverId:$device_path" |
| 86 | + |
55 | 87 | }
|
56 | 88 |
|
57 | 89 | cloud_volume_detach_openstack() {
|
58 |
| - local VM_SERVER="$1" |
59 |
| - local VM_VOL_NAME="$2" |
| 90 | + |
| 91 | + if [ "$1" = "__not_attached__" ];then |
| 92 | + echo "Skipping detach because device is not attached to any server" |
| 93 | + return 0 |
| 94 | + fi |
| 95 | + |
| 96 | + local VM_SERVER=${1:37:36} |
| 97 | + local VM_VOL_ID=${1:0:36} |
| 98 | + |
| 99 | + if [ -z "$VM_SERVER" -o -z "$VM_VOL_ID" ];then |
| 100 | + # Cannot proceed anyway - guess volume never got attached |
| 101 | + echo "__not_attached__" |
| 102 | + return 0 |
| 103 | + fi |
60 | 104 |
|
61 | 105 | # needed at all?
|
62 |
| - nova volume-show "$VM_VOL_NAME" | grep -q in-use || return 0 |
| 106 | + $NOVACLIENT volume-detach "$VM_SERVER" "$VM_VOL_ID" |
| 107 | + |
| 108 | + state=`_get_disk_state $VM_VOL_ID` |
| 109 | + |
| 110 | + while [ "$state" = detaching ];do |
| 111 | + sleep 1 |
| 112 | + state=`_get_disk_state $VM_VOL_ID` |
| 113 | + done |
| 114 | + |
| 115 | + if [ "$state" = "available" ];then |
| 116 | + echo "__not_attached__" |
| 117 | + return 0 |
| 118 | + fi |
63 | 119 | # umount seems not to be enough
|
64 | 120 | sync
|
65 |
| - if ! nova volume-detach "$VM_SERVER" "$VM_VOL_NAME"; then |
66 |
| - echo "ERROR: nova detach of $VM_VOL_NAME failed." >&2 |
| 121 | + |
| 122 | + if ! $NOVACLIENT volume-detach "$VM_SERVER" "$VM_VOL_ID"; then |
| 123 | + echo "ERROR: nova detach of $VM_VOL_ID failed." >&2 |
67 | 124 | return 3
|
68 | 125 | fi
|
69 |
| - while nova volume-show "$VM_VOL_NAME" | grep -q availabe; do |
| 126 | + while [ "$state" != "available" ]; do |
| 127 | + state=`_get_disk_state $VM_VOL_ID` |
70 | 128 | sleep 3
|
71 | 129 | done
|
72 | 130 | return 0
|
| 131 | + |
73 | 132 | }
|
74 | 133 |
|
75 | 134 | vm_verify_options_openstack() {
|
76 |
| - # verify settings |
| 135 | + |
| 136 | + # Checking for required tools (nova and cinder) |
| 137 | + |
| 138 | + NOVACLIENT=`type -p nova` |
| 139 | + if test -z "$NOVACLIENT" ; then |
| 140 | + cleanup_and_exit 3 "ERROR: nova not installed. Please install nova and try again" |
| 141 | + fi |
| 142 | + |
| 143 | + CINDERCLIENT=`type -p cinder` |
| 144 | + if test -z "$CINDERCLIENT" ; then |
| 145 | + cleanup_and_exit 3 "ERROR: nova not installed. Please install cinder and try again" |
| 146 | + fi |
| 147 | + |
| 148 | + # verify options |
| 149 | + |
77 | 150 | if test -z "$OS_AUTH_URL" ; then
|
78 | 151 | cleanup_and_exit 3 "ERROR: No openstack environment set. This vm-type works only inside of an openstack VM."
|
79 | 152 | fi
|
80 |
| - if test -z "$OBS_OPENSTACK_KERNEL_IMAGE_ID" ; then |
81 |
| - cleanup_and_exit 3 "ERROR: No image refering to kernel and ramdisk is defined in OBS_OPENSTACK_KERNEL_IMAGE_ID env." |
| 153 | + if test -z "$VM_KERNEL" ; then |
| 154 | + cleanup_and_exit 3 "ERROR: No worker root VM volume name specified." |
82 | 155 | fi
|
83 |
| - if test -z "$VM_VOLUME_NAME" ; then |
| 156 | + if test -z "$VM_IMAGE" ; then |
84 | 157 | cleanup_and_exit 3 "ERROR: No worker root VM volume name specified."
|
85 | 158 | fi
|
86 |
| - if test -z "$VM_VOLUME_SWAP" ; then |
| 159 | + if test -z "$VM_SWAP" ; then |
87 | 160 | cleanup_and_exit 3 "ERROR: No worker swap VM volume name specified."
|
88 | 161 | fi
|
89 | 162 | if test -z "$VM_SERVER" ; then
|
90 |
| - cleanup_and_exit 3 "ERROR: No VM server nod name specified (usually this instance)." |
| 163 | + cleanup_and_exit 3 "ERROR: No VM server node name specified (usually this instance)." |
| 164 | + fi |
| 165 | + if test -z "$VM_WORKER" ; then |
| 166 | + cleanup_and_exit 3 "ERROR: No VM worker node name specified (the instance to be created)." |
91 | 167 | fi
|
92 | 168 |
|
93 |
| - VM_ROOTDEV=/dev/vda |
94 |
| - VM_SWAPDEV=/dev/vdb |
| 169 | + if test -z "$VM_OS_FLAVOR" ; then |
| 170 | + cleanup_and_exit 3 "ERROR: No VM openstack flavor set (--os-flavor <FLAVOR-NAME|FLAVOR-ID>)." |
| 171 | + fi |
| 172 | + |
| 173 | + # set default values |
| 174 | + |
| 175 | + VM_ROOTDEV="LABEL=obsrootfs" |
| 176 | + VM_SWAPDEV="LABEL=obsswapfs" |
| 177 | + |
| 178 | + |
| 179 | + qemu_rootdev=/dev/vda |
| 180 | + VM_VOLUME_GRUB="$VM_KERNEL" |
| 181 | + VM_KERNEL=__not_attached__ |
| 182 | + VM_VOLUME_NAME="$VM_IMAGE" |
| 183 | + VM_IMAGE=__not_attached__ |
| 184 | + VM_VOLUME_SWAP="$VM_SWAP" |
| 185 | + VM_SWAP=__not_attached__ |
| 186 | + |
95 | 187 | }
|
96 | 188 |
|
97 | 189 | vm_attach_root_openstack() {
|
98 |
| - VM_IMAGE=`cloud_volume_attach_openstack "$VM_SERVER" "$VM_VOLUME_NAME" "$VM_IMAGE"` |
| 190 | + TMP=`cloud_volume_attach_openstack "$VM_SERVER" "$VM_VOLUME_NAME"` |
| 191 | + VM_IMAGE=`echo "$TMP"|cut -d: -f3` |
99 | 192 | test "${VM_IMAGE:0:5}" = "/dev/" || cleanup_and_exit 3
|
| 193 | + VM_IMAGE_ATTACH_INFO=${TMP:0:73} |
100 | 194 | }
|
101 | 195 |
|
102 | 196 | vm_attach_swap_openstack() {
|
103 |
| - VM_SWAP=`cloud_volume_attach_openstack "$VM_SERVER" "$VM_VOLUME_SWAP" "$VM_SWAP"` |
104 |
| - test "${VM_SWAP:0:5}" = /dev/ || cleanup_and_exit 3 |
| 197 | + TMP=`cloud_volume_attach_openstack "$VM_SERVER" "$VM_VOLUME_SWAP"` |
| 198 | + VM_SWAP=`echo "$TMP"|cut -d: -f3` |
| 199 | + test "${VM_SWAP:0:5}" = "/dev/" || cleanup_and_exit 3 |
| 200 | + VM_SWAP_ATTACH_INFO=${TMP:0:73} |
105 | 201 | }
|
106 | 202 |
|
107 | 203 | vm_detach_root_openstack() {
|
108 |
| - cloud_volume_detach_openstack "$VM_SERVER" "$VM_VOLUME_NAME" |
| 204 | + VM_IMAGE=`cloud_volume_detach_openstack "$VM_IMAGE_ATTACH_INFO"` |
| 205 | + VM_IMAGE_ATTACH_INFO=__not_attached__ |
| 206 | + VM_IMAGE=__not_attached__ |
109 | 207 | }
|
110 | 208 |
|
111 | 209 | vm_detach_swap_openstack() {
|
112 |
| - cloud_volume_detach_openstack "$VM_SERVER" "$VM_VOLUME_SWAP" |
| 210 | + VM_SWAP=`cloud_volume_detach_openstack "$VM_SWAP_ATTACH_INFO"` |
| 211 | + VM_SWAP_ATTACH_INFO=__not_attached__ |
| 212 | + VM_SWAP=__not_attached__ |
113 | 213 | }
|
114 | 214 |
|
115 | 215 | vm_cleanup_openstack() {
|
116 |
| - cloud_volume_detach_openstack "$VM_SERVER" "$VM_VOLUME_NAME" |
117 |
| - cloud_volume_detach_openstack "$VM_SERVER" "$VM_VOLUME_SWAP" |
| 216 | + vm_detach_root_openstack |
| 217 | + vm_detach_swap_openstack |
118 | 218 | }
|
119 | 219 |
|
120 | 220 | vm_fixup_openstack() {
|
121 | 221 | # No way to handle this via init= parameter here....
|
122 | 222 | echo "#!/bin/sh" > "$BUILD_ROOT/sbin/init"
|
| 223 | + echo 'echo "exec /.build/build \"$@\""' >> "$BUILD_ROOT/sbin/init" |
123 | 224 | echo 'exec /.build/build "$@"' >> "$BUILD_ROOT/sbin/init"
|
| 225 | + echo 'echo "Waiting for input"' >> "$BUILD_ROOT/sbin/init" |
| 226 | + echo 'read' >> "$BUILD_ROOT/sbin/init" |
124 | 227 | chmod 0755 "$BUILD_ROOT/sbin/init"
|
125 | 228 | }
|
126 | 229 |
|
127 | 230 | vm_wipe_openstack() {
|
128 |
| - : |
| 231 | + if [ -n "$VM_WORKER" ];then |
| 232 | + $NOVACLIENT delete $VM_WORKER |
| 233 | + fi |
129 | 234 | }
|
130 | 235 |
|
131 | 236 | vm_kill_openstack() {
|
132 |
| - if nova show "$VM_VOLUME_NAME" >/dev/null 2>&1 ; then |
133 |
| - if ! nova delete "$VM_VOLUME_NAME" ; then |
| 237 | + if $NOVACLIENT show "$VM_WORKER" >/dev/null 2>&1 ; then |
| 238 | + if ! $NOVACLIENT delete "$VM_WORKER" ; then |
134 | 239 | cleanup_and_exit 1 "could not kill openstack vm build $VM_VOLUME_NAME"
|
135 | 240 | fi
|
136 | 241 | fi
|
137 | 242 | }
|
| 243 | +wait_for_delete_instance() { |
| 244 | + FOUND=`$NOVACLIENT list|grep $VM_WORKER` |
| 245 | + while [ -n "$FOUND" ];do |
| 246 | + FOUND=`$NOVACLIENT list|grep $VM_WORKER` |
| 247 | + sleep 1 |
| 248 | + done |
| 249 | +} |
138 | 250 |
|
139 | 251 | vm_startup_openstack() {
|
140 |
| - nova boot --image $OBS_OPENSTACK_KERNEL_IMAGE_ID --flavor m1.small --block_device_mapping vda=${VM_VOLUME_NAME}::$(( $VMDISK_ROOTSIZE / 1024 )):0 --block_device_mapping vdb=${VM_VOLUME_SWAP}::1:0 --poll "build-$VM_VOLUME_NAME" || cleanup_and_exit 3 |
141 |
| - nova console-log "build-$VM_VOLUME_NAME" |
| 252 | + VM_VOL_ROOT_ID=`cinder_volume2id ${VM_VOLUME_NAME}` |
| 253 | + VM_VOL_SWAP_ID=`cinder_volume2id ${VM_VOLUME_SWAP}` |
| 254 | + VM_VOL_BOOT_ID=`cinder_volume2id ${VM_VOLUME_GRUB}` |
| 255 | + |
| 256 | + OUTPUT=`\ |
| 257 | + $NOVACLIENT boot \ |
| 258 | + --flavor $VM_OS_FLAVOR \ |
| 259 | + --block-device source=volume,dest=volume,bootindex=0,id=${VM_VOL_BOOT_ID}\ |
| 260 | + --block-device source=volume,dest=volume,bootindex=1,id=${VM_VOL_ROOT_ID}\ |
| 261 | + --block-device source=volume,dest=volume,bootindex=2,id=${VM_VOL_SWAP_ID}\ |
| 262 | + --poll "$VM_WORKER" || cleanup_and_exit 3\ |
| 263 | + ` |
| 264 | + WS_URL=`$NOVACLIENT get-serial-console $VM_WORKER|grep serial |perl -p -e 's#.*(ws://.*) \|#$1#'` |
| 265 | + |
| 266 | + while [ -z "$WS_URL" -a $COUNTER -gt 0 ];do |
| 267 | + WS_URL=`$NOVACLIENT get-serial-console $VM_WORKER|grep serial |perl -p -e 's#.*(ws://.*) \|#$1#'` |
| 268 | + sleep 1 |
| 269 | + COUNTER=$(($COUNTER - 1)) |
| 270 | + done |
| 271 | + |
| 272 | + VM_BUILD_ID=`_get_field "$OUTPUT" id` |
| 273 | + |
| 274 | + if ! $BUILD_DIR/openstack-console "${WS_URL}";then |
| 275 | + $NOVACLIENT delete $VM_BUILD_ID |
| 276 | + cleanup_and_exit 3 |
| 277 | + else |
| 278 | + $NOVACLIENT delete $VM_BUILD_ID |
| 279 | + wait_for_delete_instance |
| 280 | + fi |
142 | 281 | }
|
| 282 | + |
0 commit comments