|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +set -aueo pipefail |
| 4 | + |
| 5 | +# shellcheck disable=SC1091 |
| 6 | +source .env |
| 7 | +DEPLOY_ON_OPENSHIFT="${DEPLOY_ON_OPENSHIFT:-false}" |
| 8 | +USE_PRIVATE_REGISTRY="${USE_PRIVATE_REGISTRY:-false}" |
| 9 | +MESH_NAME="${MESH_NAME:-osm}" |
| 10 | + |
| 11 | +kubectl create ns kafka |
| 12 | + |
| 13 | +bin/osm namespace add --mesh-name "$MESH_NAME" kafka |
| 14 | + |
| 15 | +bin/osm metrics enable --namespace kafka |
| 16 | + |
| 17 | +helm install kafka bitnami/kafka --set replicaCount=3 --set zookeeper.enabled=false --set zookeeperChrootPath='/kafka-root' --set serviceAccount.create=true --set serviceAccount.name=kafka --namespace kafka --set "externalZookeeper.servers={kafka-zookeeper-0.kafka-zookeeper-headless.zookeeper.svc.cluster.local,kafka-zookeeper-1.kafka-zookeeper-headless.zookeeper.svc.cluster.local,kafka-zookeeper-2.kafka-zookeeper-headless.zookeeper.svc.cluster.local}" |
| 18 | + |
| 19 | +if [ "$DEPLOY_ON_OPENSHIFT" = true ] ; then |
| 20 | + oc adm policy add-scc-to-user privileged -z "kafka" -n "kafka" |
| 21 | + if [ "$USE_PRIVATE_REGISTRY" = true ]; then |
| 22 | + oc secrets link "kafka" "$CTR_REGISTRY_CREDS_NAME" --for=pull -n "kafka" |
| 23 | + fi |
| 24 | +fi |
| 25 | + |
| 26 | +kubectl apply -f - <<EOF |
| 27 | +apiVersion: specs.smi-spec.io/v1alpha4 |
| 28 | +kind: TCPRoute |
| 29 | +metadata: |
| 30 | + name: kafka |
| 31 | + namespace: kafka |
| 32 | +spec: |
| 33 | + matches: |
| 34 | + ports: |
| 35 | + - 9092 |
| 36 | +--- |
| 37 | +apiVersion: specs.smi-spec.io/v1alpha4 |
| 38 | +kind: TCPRoute |
| 39 | +metadata: |
| 40 | + name: kafka-internal |
| 41 | + namespace: kafka |
| 42 | +spec: |
| 43 | + matches: |
| 44 | + ports: |
| 45 | + - 9092 |
| 46 | + - 9093 |
| 47 | +--- |
| 48 | +kind: TrafficTarget |
| 49 | +apiVersion: access.smi-spec.io/v1alpha3 |
| 50 | +metadata: |
| 51 | + name: kafka |
| 52 | + namespace: kafka |
| 53 | +spec: |
| 54 | + destination: |
| 55 | + kind: ServiceAccount |
| 56 | + name: kafka |
| 57 | + namespace: kafka |
| 58 | + rules: |
| 59 | + - kind: TCPRoute |
| 60 | + name: kafka |
| 61 | + sources: |
| 62 | + - kind: ServiceAccount |
| 63 | + name: default |
| 64 | + namespace: kafka |
| 65 | +--- |
| 66 | +kind: TrafficTarget |
| 67 | +apiVersion: access.smi-spec.io/v1alpha3 |
| 68 | +metadata: |
| 69 | + name: kafka-internal |
| 70 | + namespace: kafka |
| 71 | +spec: |
| 72 | + destination: |
| 73 | + kind: ServiceAccount |
| 74 | + name: kafka |
| 75 | + namespace: kafka |
| 76 | + rules: |
| 77 | + - kind: TCPRoute |
| 78 | + name: kafka-internal |
| 79 | + sources: |
| 80 | + - kind: ServiceAccount |
| 81 | + name: kafka |
| 82 | + namespace: kafka |
| 83 | +EOF |
| 84 | + |
| 85 | +# Use these commands to test out Kafka |
| 86 | +# |
| 87 | +# Create and exec into a pod with a Kafka image |
| 88 | +# kubectl run --rm -it kafka-client --image docker.io/bitnami/kafka:3.1.0-debian-10-r60 --namespace kafka -- bash |
| 89 | +# Run the Kafka producer command (opens an interactive prompt where each line entered is sent as a Kafka message) |
| 90 | +# You can exit the prompt with Ctrl-C |
| 91 | +# kafka-console-producer.sh --broker-list kafka-0.kafka-headless.kafka.svc.cluster.local:9092 --topic test |
| 92 | +# Now, run the Kafka consumer command (starts a loop to read messages from Kafka) |
| 93 | +# You can exit the prompt with Ctrl-C |
| 94 | +# kafka-console-consumer.sh --bootstrap-server kafka.kafka.svc.cluster.local:9092 --topic test --from-beginning |
0 commit comments