|
| 1 | +#! /usr/bin/env bash |
| 2 | + |
| 3 | +ACCESS_TOKEN_URI="https://oauth2.googleapis.com/token" |
| 4 | +BUCKET_NAME="agoric-snapshots-public" |
| 5 | +CONTAINER_MEMORY="200GiB" |
| 6 | +IMAGE_TAG="${IMAGE_TAG:-"57"}" |
| 7 | +FIRST_NODE_DATA_FOLDER_NAME="agoric1" |
| 8 | +FIRST_NODE_IP="10.99.0.2" |
| 9 | +PARENT_FOLDER="/var/lib/google/code" |
| 10 | +PROJECT_NAME="simulationlab" |
| 11 | +REGION="us-central1-a" |
| 12 | +REPOSITORY_URL="https://github.com/agoric-labs/cosmos-genesis-tinkerer.git" |
| 13 | +STORAGE_WRITE_SCOPES="https://www.googleapis.com/auth/devstorage.read_write" |
| 14 | +SECOND_NODE_DATA_FOLDER_NAME="agoric2" |
| 15 | +SECOND_NODE_IP="10.99.0.3" |
| 16 | +STORAGE_UPLOAD_URL="https://storage.googleapis.com/upload/storage/v1/b" |
| 17 | +TIMESTAMP="$(date '+%s')" |
| 18 | +VALIDATOR_STATE_FILE_NAME="priv_validator_state.json" |
| 19 | +VM_NAME="jump-3" |
| 20 | +VM_OPERATIONS_SCOPES="https://www.googleapis.com/auth/cloud-platform" |
| 21 | +VM_RUNNING_STATUS="RUNNING" |
| 22 | +VM_STOPPED_STATUS="TERMINATED" |
| 23 | + |
| 24 | +FIRST_NODE_LOGS_FILE="/tmp/$FIRST_NODE_DATA_FOLDER_NAME.logs" |
| 25 | +IMAGE_NAME="ghcr.io/agoric/agoric-sdk:$IMAGE_TAG" |
| 26 | +LOGS_FILE="/tmp/$TIMESTAMP.logs" |
| 27 | +REPOSITORY_FOLDER_NAME="tinkerer_$TIMESTAMP" |
| 28 | +SECOND_NODE_LOGS_FILE="/tmp/$SECOND_NODE_DATA_FOLDER_NAME.logs" |
| 29 | +STORAGE_WRITE_SERVICE_ACCOUNT_JSON_FILE_PATH="$PARENT_FOLDER/chain-snapshot-writer.json" |
| 30 | +VM_ADMIN_SERVICE_ACCOUNT_JSON_FILE_PATH="$PARENT_FOLDER/vm-admin.json" |
| 31 | + |
| 32 | +execute_command_inside_vm() { |
| 33 | + local command="$1" |
| 34 | + gcloud compute ssh "$VM_NAME" \ |
| 35 | + --command "$command" --project "$PROJECT_NAME" --zone "$REGION" |
| 36 | +} |
| 37 | + |
| 38 | +get_vm_status() { |
| 39 | + gcloud compute instances describe "$VM_NAME" \ |
| 40 | + --format "value(status)" --project "$PROJECT_NAME" --zone "$REGION" |
| 41 | +} |
| 42 | + |
| 43 | +log_warning() { |
| 44 | + printf "\033[33m%s\033[0m\n" "$1" |
| 45 | +} |
| 46 | + |
| 47 | +signal_vm_start() { |
| 48 | + gcloud compute instances start "$VM_NAME" \ |
| 49 | + --async --project "$PROJECT_NAME" --zone "$REGION" >/dev/null 2>&1 |
| 50 | +} |
| 51 | + |
| 52 | +start_vm() { |
| 53 | + if test "$(get_vm_status)" = "$VM_STOPPED_STATUS"; then |
| 54 | + log_warning "Starting VM $VM_NAME" |
| 55 | + signal_vm_start |
| 56 | + wait_for_vm_status "$VM_RUNNING_STATUS" |
| 57 | + else |
| 58 | + log_warning "VM $VM_NAME already running" |
| 59 | + fi |
| 60 | +} |
| 61 | + |
| 62 | +wait_for_vm_status() { |
| 63 | + local status="$1" |
| 64 | + while [ "$(get_vm_status)" != "$status" ]; do |
| 65 | + sleep 5 |
| 66 | + done |
| 67 | +} |
| 68 | + |
| 69 | +start_vm |
| 70 | +execute_command_inside_vm " |
| 71 | + #! /bin/bash |
| 72 | +
|
| 73 | + set -o errexit -o errtrace |
| 74 | +
|
| 75 | + clone_repository() { |
| 76 | + git clone $REPOSITORY_URL $REPOSITORY_FOLDER_NAME |
| 77 | + } |
| 78 | + compress_folders() { |
| 79 | + local first_chain_folder=state/mainfork/$FIRST_NODE_DATA_FOLDER_NAME |
| 80 | + local second_chain_folder=state/mainfork/$SECOND_NODE_DATA_FOLDER_NAME |
| 81 | +
|
| 82 | + local folder_size=\$(sudo du --human-readable --null --summarize \$first_chain_folder | awk '{printf \"%s\", \$1}') |
| 83 | +
|
| 84 | + sudo mv \$first_chain_folder/data/$VALIDATOR_STATE_FILE_NAME state/$VALIDATOR_STATE_FILE_NAME |
| 85 | + sudo chmod 666 state/$VALIDATOR_STATE_FILE_NAME |
| 86 | +
|
| 87 | + echo \"Compressing data folder of size \$folder_size\" |
| 88 | + sudo tar --create --file state/mainfork_data_$TIMESTAMP.tar.gz --directory \$first_chain_folder --gzip data |
| 89 | + sudo tar --create --file state/mainfork_${FIRST_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz --directory \$first_chain_folder --gzip config |
| 90 | + sudo tar --create --file state/mainfork_${SECOND_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz --directory \$second_chain_folder --gzip config |
| 91 | + sudo tar --create --file state/keyring-test.tar.gz --directory \$first_chain_folder --gzip keyring-test |
| 92 | +
|
| 93 | + sudo chmod 666 state/mainfork_data_$TIMESTAMP.tar.gz |
| 94 | + sudo chmod 666 state/mainfork_${FIRST_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz |
| 95 | + sudo chmod 666 state/mainfork_${SECOND_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz |
| 96 | + sudo chmod 666 state/keyring-test.tar.gz |
| 97 | + } |
| 98 | + create_log_files() { |
| 99 | + touch $FIRST_NODE_LOGS_FILE $SECOND_NODE_LOGS_FILE |
| 100 | + } |
| 101 | + export_genesis() { |
| 102 | + docker run \ |
| 103 | + --entrypoint /scripts/export.sh \ |
| 104 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/scripts:/scripts:rw \ |
| 105 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/state/mainnet/agoric:/root/agoric:rw \ |
| 106 | + $IMAGE_NAME |
| 107 | + } |
| 108 | + get_access_token() { |
| 109 | + local client_email |
| 110 | + local private_key |
| 111 | + local scopes |
| 112 | + local service_account_json_file_path |
| 113 | +
|
| 114 | + scopes=\"\$1\" |
| 115 | + service_account_json_file_path=\"\$2\" |
| 116 | +
|
| 117 | + private_key=\"\$(jq -r \".private_key\" \"\$service_account_json_file_path\" | sed \"s/\\\\n/\\n/g\")\" |
| 118 | + client_email=\"\$(jq -r \".client_email\" \"\$service_account_json_file_path\")\" |
| 119 | +
|
| 120 | + local iat |
| 121 | + local exp |
| 122 | + iat=\"\$(date +%s)\" |
| 123 | + exp=\$(( iat + 3600 )) |
| 124 | +
|
| 125 | + local header_base64 |
| 126 | + local claim_base64 |
| 127 | + local to_sign |
| 128 | + local signature |
| 129 | + local jwt |
| 130 | +
|
| 131 | + header_base64=\"\$( \\ |
| 132 | + echo -n '{\"alg\":\"RS256\",\"typ\":\"JWT\"}' \\ |
| 133 | + | openssl base64 -e \\ |
| 134 | + | tr -d '=\\n' \\ |
| 135 | + | sed 's/+/-/g; s|/|_|g' \\ |
| 136 | + )\" |
| 137 | +
|
| 138 | + claim_base64=\"\$( \\ |
| 139 | + echo -n '{\"iss\":\"'\${client_email}'\",\"scope\":\"'\$scopes'\",\"aud\":\"$ACCESS_TOKEN_URI\",\"exp\":'\${exp}',\"iat\":'\${iat}'}' \\ |
| 140 | + | openssl base64 -e \\ |
| 141 | + | tr -d '=\\n' \\ |
| 142 | + | sed 's/+/-/g; s|/|_|g' \\ |
| 143 | + )\" |
| 144 | +
|
| 145 | + to_sign=\"\${header_base64}.\${claim_base64}\" |
| 146 | +
|
| 147 | + signature=\"\$( \\ |
| 148 | + echo -n \"\$to_sign\" \\ |
| 149 | + | openssl dgst -sha256 -sign <(echo \"\$private_key\") \\ |
| 150 | + | openssl base64 -e \\ |
| 151 | + | tr -d '=\\n' \\ |
| 152 | + | sed 's/+/-/g; s|/|_|g' \\ |
| 153 | + )\" |
| 154 | +
|
| 155 | + jwt=\"\${to_sign}.\${signature}\" |
| 156 | +
|
| 157 | + local response |
| 158 | + response=\"\$(curl -s -X POST \\ |
| 159 | + -d \"grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer\" \\ |
| 160 | + -d \"assertion=\${jwt}\" \\ |
| 161 | + \"$ACCESS_TOKEN_URI\")\" |
| 162 | +
|
| 163 | + local token |
| 164 | + token=\"\$(echo \"\$response\" | jq -r \".access_token\")\" |
| 165 | +
|
| 166 | + echo \"\$token\" |
| 167 | + } |
| 168 | + main() { |
| 169 | + trap stop_vm EXIT |
| 170 | + cd $PARENT_FOLDER |
| 171 | + clone_repository |
| 172 | + cd $REPOSITORY_FOLDER_NAME |
| 173 | + restore_from_state_sync |
| 174 | + export_genesis |
| 175 | + tinker_genesis |
| 176 | + remove_extra_files |
| 177 | + remove_all_running_containers |
| 178 | + create_log_files |
| 179 | + start_mainfork_node $FIRST_NODE_DATA_FOLDER_NAME $FIRST_NODE_IP > $FIRST_NODE_LOGS_FILE 2>&1 & |
| 180 | + start_mainfork_node $SECOND_NODE_DATA_FOLDER_NAME $SECOND_NODE_IP > $SECOND_NODE_LOGS_FILE 2>&1 & |
| 181 | + wait_for_some_block_commits $FIRST_NODE_LOGS_FILE |
| 182 | + wait_for_some_block_commits $SECOND_NODE_LOGS_FILE |
| 183 | + remove_all_running_containers |
| 184 | + compress_folders |
| 185 | + upload_file_to_storage $BUCKET_NAME mainfork-snapshots/agoric_$TIMESTAMP.tar.gz state/mainfork_data_$TIMESTAMP.tar.gz |
| 186 | + upload_file_to_storage $BUCKET_NAME mainfork-snapshots/${FIRST_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz state/mainfork_${FIRST_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz |
| 187 | + upload_file_to_storage $BUCKET_NAME mainfork-snapshots/${SECOND_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz state/mainfork_${SECOND_NODE_DATA_FOLDER_NAME}_config_$TIMESTAMP.tar.gz |
| 188 | + upload_file_to_storage $BUCKET_NAME mainfork-snapshots/$VALIDATOR_STATE_FILE_NAME state/$VALIDATOR_STATE_FILE_NAME |
| 189 | + upload_file_to_storage $BUCKET_NAME mainfork-snapshots/keyring-test.tar.gz state/keyring-test.tar.gz |
| 190 | + remove_repository |
| 191 | + } |
| 192 | + remove_all_running_containers() { |
| 193 | + docker container ls --all --format '{{.ID}}' | \ |
| 194 | + xargs -I {} docker container stop {} | \ |
| 195 | + xargs -I {} docker container rm {} --force --volumes |
| 196 | + } |
| 197 | + remove_extra_files() { |
| 198 | + sudo rm --force \ |
| 199 | + state/mainfork/$FIRST_NODE_DATA_FOLDER_NAME/data/agoric/flight-recorder.bin \ |
| 200 | + state/mainfork/$SECOND_NODE_DATA_FOLDER_NAME/data/agoric/flight-recorder.bin |
| 201 | + } |
| 202 | + remove_repository() { |
| 203 | + cd \$HOME |
| 204 | + sudo rm --force --recursive $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME |
| 205 | + } |
| 206 | + restore_from_state_sync() { |
| 207 | + docker run \ |
| 208 | + --entrypoint /scripts/state_sync.sh \ |
| 209 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/scripts:/scripts:rw \ |
| 210 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/state/mainnet/agoric:/root/agoric:rw \ |
| 211 | + $IMAGE_NAME |
| 212 | + } |
| 213 | + start_mainfork_node() { |
| 214 | + local node_data_folder_name=\$1 |
| 215 | + local node_ip=\$2 |
| 216 | +
|
| 217 | + docker run \ |
| 218 | + --ip \$node_ip \ |
| 219 | + --memory $CONTAINER_MEMORY \ |
| 220 | + --mount 'type=tmpfs,destination=/tmp' \ |
| 221 | + --name \$node_data_folder_name \ |
| 222 | + --network forknet \ |
| 223 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/state/mainfork:/state:rw \ |
| 224 | + $IMAGE_NAME \ |
| 225 | + start --home /state/\$node_data_folder_name |
| 226 | + } |
| 227 | + stop_vm() { |
| 228 | + local access_token=\"\$(get_access_token \"$VM_OPERATIONS_SCOPES\" \"$VM_ADMIN_SERVICE_ACCOUNT_JSON_FILE_PATH\")\" |
| 229 | + curl \"https://compute.googleapis.com/compute/v1/projects/$PROJECT_NAME/zones/$REGION/instances/$VM_NAME/stop\" \ |
| 230 | + --header \"Authorization: Bearer \$access_token\" \ |
| 231 | + --header \"Content-Type: application/json\" \ |
| 232 | + --output /dev/null \ |
| 233 | + --request POST \ |
| 234 | + --silent |
| 235 | + } |
| 236 | + tinker_genesis() { |
| 237 | + docker run \ |
| 238 | + --entrypoint /tinkerer/scripts/tinkerer.sh \ |
| 239 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/state/mainfork:/state:rw \ |
| 240 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME/state/mainnet/agoric/export:/export:rw \ |
| 241 | + --volume $PARENT_FOLDER/$REPOSITORY_FOLDER_NAME:/tinkerer:rw \ |
| 242 | + $IMAGE_NAME |
| 243 | + } |
| 244 | + upload_file_to_storage() { |
| 245 | + local access_token=\"\$(get_access_token \"$STORAGE_WRITE_SCOPES\" \"$STORAGE_WRITE_SERVICE_ACCOUNT_JSON_FILE_PATH\")\" |
| 246 | + local bucket_name=\$1 |
| 247 | + local object_name=\$2 |
| 248 | + local file_path=\$3 |
| 249 | +
|
| 250 | + echo \"Uploading file \$file_path to bucket \$bucket_name on path \$object_name\" |
| 251 | +
|
| 252 | + local http_code=\$( |
| 253 | + curl \"$STORAGE_UPLOAD_URL/\$bucket_name/o?name=\$object_name&uploadType=media\" \ |
| 254 | + --header \"Authorization: Bearer \$access_token\" \ |
| 255 | + --output /dev/null \ |
| 256 | + --request POST \ |
| 257 | + --silent \ |
| 258 | + --upload-file \$file_path \ |
| 259 | + --write-out \"%{http_code}\" |
| 260 | + ) |
| 261 | +
|
| 262 | + if [ ! \$http_code -eq 200 ] |
| 263 | + then |
| 264 | + echo \"Failed to upload file\" |
| 265 | + exit 1 |
| 266 | + fi |
| 267 | + } |
| 268 | + wait_for_some_block_commits() { |
| 269 | + local log_file_path=\$1 |
| 270 | + tail --lines +1 --follow \$log_file_path | \ |
| 271 | + grep --extended-regexp 'block [0-9]+ commit' --max-count 5 |
| 272 | + } |
| 273 | +
|
| 274 | + main > $LOGS_FILE 2>&1 & |
| 275 | +" |
0 commit comments