Eclipse Hono

Introduce

Eclipse Hono

Architecture

graph LR
  Device -->|http,mqtt,...| Hono(Hono: Tenant & Device & Credential) --> Kafka(Kafka, ksqlDB, Connector) -->|JDBCSinkConnector| MariaDB

Components

Service Ports Comment
MariaDB 3306
Postgres 5432 Optional: JDBC Device Registry
MongoDB 27017 Optional: MongoDB Device Registry
Kafka 9092 Messaging
Kafka Connect 28083 Avro to MariaDB
Schema Registry 28081 Avro Schema Registry
ksqlDB 28088 Json Convert Avro
hono-adapter-http 8080 Http Adapter
hono-device-registry 28080 Device Registry & Credentials

Installation

VM: Centos 7

Docker, Docker Compose, K3s

User: iotops

1
2
3
4
5
6
7
useradd iotops
passwd iotops
# Ops789
groupadd wheel
usermod -a -G wheel iotops

su - iotops

Datadir

1
2
3
4
5
mkdir -p /home/data/mariadb
mkdir -p /home/data/postgres
mkdir -p /home/data/www
mkdir -p /home/data/kafka
mkdir -p /home/data/zookeeper

Docker & Docker Compose

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
## docker
sudo yum install -y yum-utils

sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.rep

sudo yum install docker-ce docker-ce-cli containerd.io

## docker compose
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose

sudo chmod +x /usr/local/bin/docker-compose

sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose

MariaDB

Docker Compose

docker-comopse.yml

1
2
3
4
5
6
7
8
9
10
11
12
version: "3.9"  # optional since v1.27.0
services:
mariadb:
image: mariadb
restart: always
container_name: mariadb
ports:
- 3306:3306
volumes:
- /home/data/mariadb:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
Initial
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
sudo docker exec -it mariadb bash

mysql -u root -p

CREATE DATABASE hono_data_db;
CREATE USER honodata@'%' IDENTIFIED BY 'honodata';
GRANT ALL ON hono_data_db.* TO 'honodata'@'%';

use hono_data_db;

CREATE TABLE `hono_telemetry_demo` (
`k1` varchar(50) NOT NULL,
`k2` varchar(50) NOT NULL,
`k3` varchar(50) NOT NULL,
`firstName` varchar(50) NOT NULL,
`lastName` varchar(50) NOT NULL,
`age` varchar(50) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

Mongodb (optional: MongoDB Device Registry)

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
version: '3'
services:
mongo:
image: mongo:4.4.6
ports:
- 27017:27017
container_name: mongodb
restart: always
volumes:
- /home/data/mongodb/data:/data/db
Initial Database
1
2
3
4
5
6
7
8
9
use hono

db.createUser(
{
user: "hono",
pwd: "hono",
roles: [ { role: "dbAdmin", db: "hono" } ]
}
)

Postgres (optional: JDBC Device Registry)

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
version: '3'
services:
postgresql:
image: postgres:9.6
environment:
POSTGRES_PASSWORD: postgres
ports:
- 5432:5432
container_name: postgresql
restart: always
volumes:
- /home/data/postgres:/var/lib/postgresql/data
Initial Database
1
2
3
4
5
6
7
psql -U postgres
\l

# hono database initial
create database hono;
create user hono with password 'hono';
grant all privileges on database hono to hono;

Kafka

Docker Compose

Upload Connector jars:

1
2
3
4
5
6
7
8
9
10
11
12
cd /home/iotops/kafka/connectors/mysql
# yum install -y wget unzip
#
wget https://cdn.mysql.com//Downloads/Connector-J/mysql-connector-java-8.0.27.tar.gz
tar -zxvf mysql-connector-java-8.0.27.tar.gz
mv mysql-connector-java-8.0.27/mysql-connector-java-8.0.27.jar ./

# https://www.confluent.io/hub/confluentinc/kafka-connect-jdbc
wget https://d1i4a15mxbxib1.cloudfront.net/api/plugins/confluentinc/kafka-connect-jdbc/versions/10.2.6/confluentinc-kafka-connect-jdbc-10.2.6.zip

unzip confluentinc-kafka-connect-jdbc-10.2.6.zip
mv confluentinc-kafka-connect-jdbc-10.2.6/lib/*.* ./

docker-comopse.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
version: "3.9"
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.0.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
volumes:
- /home/data/zookeeper/data:/var/lib/zookeeper/data
- /home/data/zookeeper/log:/var/lib/zookeeper/log
networks:
- local

kafka:
image: confluentinc/cp-kafka:7.0.0
hostname: kafka
container_name: kafka
ports:
- "9092:9092"
- "29092:29092"
depends_on:
- zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:9092,EXTERNAL://10.40.80.38:29092"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_BROKER_ID: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
volumes:
- /home/data/kafka/data:/var/lib/kafka/data
networks:
- local

schema-registry:
image: confluentinc/cp-schema-registry:7.0.0
hostname: schema-registry
container_name: schema-registry
depends_on:
- kafka
ports:
- "28081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'kafka:9092'
SCHEMA_REGISTRY_LISTENERS: http://schema-registry:8081
networks:
- local

ksqldb-server:
image: confluentinc/ksqldb-server:0.23.1
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- kafka
ports:
- "28088:8088"
environment:
KSQL_LISTENERS: http://0.0.0.0:8088
KSQL_BOOTSTRAP_SERVERS: kafka:9092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081
networks:
- local

ksqldb-cli:
image: confluentinc/ksqldb-cli:0.23.1
container_name: ksqldb-cli
depends_on:
- ksqldb-server
entrypoint: /bin/sh
tty: true
networks:
- local

kafka-connect:
image: confluentinc/cp-kafka-connect:7.0.0
hostname: kafka-connect
container_name: kafka-connect
depends_on:
- kafka
ports:
- 28083:8083
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka:9092
CONNECT_REST_PORT: 28083
CONNECT_GROUP_ID: "kc-group-local"
CONNECT_CONFIG_STORAGE_TOPIC: "kc-config"
CONNECT_OFFSET_STORAGE_TOPIC: "kc-offsets"
CONNECT_STATUS_STORAGE_TOPIC: "kc-status"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.storage.StringConverter"
CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "10.40.80.38"
CONNECT_PLUGIN_PATH: /usr/share/java,/usr/share/connector
volumes:
- ./connectors/mysql:/usr/share/connector/mysql
networks:
- local

networks:
local:
driver: bridge

Stream

  1. open ksqldb-cli
1
sudo docker exec -it ksqldb-cli ksql http://ksqldb-server:8088
  1. create data convert stream
1
2
3
CREATE STREAM S1_1 (firstName VARCHAR, lastName VARCHAR, age INT) WITH (KAFKA_TOPIC='hono.telemetry.MY_TENANT', VALUE_FORMAT='JSON');

CREATE STREAM S1_2 WITH (VALUE_FORMAT='AVRO', KAFKA_TOPIC='hono.telemetry.avro') AS SELECT * FROM S1_1;

Connector

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
curl -X PUT \
http://10.40.80.38:28083/connectors/hono-telemetry-demo/config \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-d '{
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"table.name.format": "hono_telemetry_demo",
"consumer.override.group.id": "1227",
"connection.password": "honodata",
"tasks.max": "1",
"topics": "hono.telemetry.avro",
"batch.size": "3000",
"value.converter.schema.registry.url": "http://schema-registry:8081",
"delete.enabled": "false",
"auto.evolve": "false",
"connection.user": "honodata",
"auto.create": "false",
"connection.url": "jdbc:mysql://10.40.80.38:3306/hono_data_db?verifyServerCertificate=false&useSSL=false&requireSSL=false",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"insert.mode": "upsert",
"pk.mode": "kafka",
"pk.fields": "k1,k2,k3"
}'

Hono

  • k3s
  • helm

K3s

1
curl -sfL http://rancher-mirror.cnrancher.com/k3s/k3s-install.sh | INSTALL_K3S_MIRROR=cn sh -

Helm

  1. Install helm
1
2
tar -zxvf helm-v3.7.2-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
  1. Add chart repository
1
2
3
4
5
6
7
8
9
10
11
12
# helm repo
# https://www.eclipse.org/packages/repository/
helm repo add eclipse-iot https://eclipse.org/packages/charts

# helm list --all-namespaces -a
# helm --kubeconfig /etc/rancher/k3s/k3s.yaml list --all-namespaces -a

k3s kubectl create namespace hono

# helm uninstall eclipse-hono -n hono

k3s kubectl get service -n hono
  1. Config custom.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
deviceRegistryExample:
#enabled: false
type: jdbc # embedded|mongodb|jdbc
addExampleData: false
mongoDBBasedDeviceRegistry:
mongodb:
host: 10.40.80.38
port: 27017
dbName: hono
username: hono
password: hono
jdbcBasedDeviceRegistry:
applicationProfiles: registry-adapter,registry-management,tenant-service,create-schema,dev
registry:
jdbc:
adapter:
url: jdbc:postgresql://10.40.80.38:5432/hono
driverClass: org.postgresql.Driver
username: hono
password: hono
management:
url: jdbc:postgresql://10.40.80.38:5432/hono
driverClass: org.postgresql.Driver
username: hono
password: hono
tenant:
jdbc:
adapter:
url: jdbc:postgresql://10.40.80.38:5432/hono
driverClass: org.postgresql.Driver
username: hono
password: hono
management:
url: jdbc:postgresql://10.40.80.38:5432/hono
driverClass: org.postgresql.Driver
username: hono
password: hono

adapters:
kafkaMessagingSpec:
commonClientConfig:
bootstrap.servers: 10.40.80.38:29092
amqp:
enabled: false
mqtt:
enabled: false

messagingNetworkTypes:
- kafka

amqpMessagingNetworkExample:
enabled: false

kafkaMessagingClusterExample:
enabled: false
  1. Deploy
1
2
3
sudo chmod 644 /etc/rancher/k3s/k3s.yaml

helm --kubeconfig /etc/rancher/k3s/k3s.yaml install -f ./custom.yaml -n hono eclipse-hono eclipse-iot/hono
  1. Uninstall (Ops)
1
helm --kubeconfig /etc/rancher/k3s/k3s.yaml uninstall eclipse-hono -n hono

Telemetry (Demo)

Mock Device Send Telemetry Data

  1. Create Tenant
1
curl --location --request POST 'http://10.40.80.38:28080/v1/tenants/MY_TENANT'
  1. Create Device
1
curl --location --request POST 'http://10.40.80.38:28080/v1/devices/MY_TENANT/MY_DEVICE'
  1. Set Credentials for Device
1
2
3
4
5
6
7
8
9
curl --location --request PUT 'http://10.40.80.38:28080/v1/credentials/MY_TENANT/MY_DEVICE' \
--header 'Content-Type: application/json' \
-d '[{
"type": "hashed-password",
"auth-id": "MY_DEVICE",
"secrets": [{
"pwd-plain": "my-pwd"
}]
}]'
  1. Send Telemetry

    Basic Authorization 用户名为:MY_DEVICE@MY_TENANT,密码为:my-pwd

1
2
3
4
5
6
7
curl --location --request POST 'http://10.40.80.38:8080/telemetry' \
--header 'Authorization: Basic TVlfREVWSUNFQE1ZX1RFTkFOVDpteS1wd2Q=' \
--header 'Content-Type: application/json' \
-d '{
"firstName": "fn23",
"lastName": "ddd"
}'

Body (Demo):

  • firstName *
  • lastName *
  • age

Command (Demo: HttpAdapter)

  1. Device Send Telemetry/Event With ttd:60

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    curl --location -v --request POST 'http://10.40.80.38:8080/event' \
    > --header 'hono-ttd: 60' \
    > --header 'Authorization: Basic TVlfREVWSUNFQE1ZX1RFTkFOVDpteS1wd2Q=' \
    > --header 'Content-Type: application/json' \
    > -d '{
    > "firstName": "fn23",
    > "lastName": "ddd",
    > "age": 4
    > }'
    * About to connect() to 10.40.80.38 port 8080 (#0)
    * Trying 10.40.80.38...
    * Connected to 10.40.80.38 (10.40.80.38) port 8080 (#0)
    > POST /event HTTP/1.1
    > User-Agent: curl/7.29.0
    > Host: 10.40.80.38:8080
    > Accept: */*
    > hono-ttd: 60
    > Authorization: Basic TVlfREVWSUNFQE1ZX1RFTkFOVDpteS1wd2Q=
    > Content-Type: application/json
    > Content-Length: 64
    >
    * upload completely sent off: 64 out of 64 bytes
    < HTTP/1.1 200 OK
    < vary: origin
    < hono-command: 1
    < hono-cmd-req-id: 203111
    < content-type: application/json
    < content-length: 32
    <
    * Connection #0 to host 10.40.80.38 left intact
    {"col_foo":1, "firstName": "zs"}

    60(可自定义) 秒内执行第2步,则响应码为 200 OK,否则为 202 Accepted

    若执行第2步选择的是需要设备端响应命令执行结果的,则此时响应头会包含 hono-cmd-req-id,此 ID 将作为第 3 步请求的路径参数。

  2. Application Send Command

    Command 分为两种:One-way 与 Command(带响应的命令模式,默认)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    ## kcat
    ## brew install kcat
    ##
    ## kcat -b 10.40.80.38:29092 -L

    docker run -it --rm --entrypoint /bin/sh edenhill/kcat:1.7.0
    ## docker run -it --rm edenhill/kcat:1.7.0 -b 10.40.80.38:29092 -L

    ## Command One-way
    echo 'MY_DEVICE${"col_foo":1, "firstName": "zs"}'|kcat -b 10.40.80.38:29092 -t hono.command.MY_TENANT -P -K $ -H device_id=MY_DEVICE -H subject=1 -H content-type=application/json

    ## Command With Response
    echo 'MY_DEVICE${"col_foo":1, "firstName": "zs"}'|kcat -b 10.40.80.38:29092 -t hono.command.MY_TENANT -P -K $ -H device_id=MY_DEVICE -H subject=1 -H content-type=application/json -H correlation-id=111 -H response-required=true
  3. Device Send Command Response

    请求路径中:

    • 203111 为第一步从响应头中获得的 hono-cmd-req-id 值。

    • hono-cmd-status 为命令执行后的状态码。

    1
    2
    3
    4
    5
    6
    7
    8
    curl --location --request POST 'http://10.40.80.38:8080/command/res/203111?hono-cmd-status=201' \
    --header 'Authorization: Basic TVlfREVWSUNFQE1ZX1RFTkFOVDpteS1wd2Q=' \
    --header 'Content-Type: application/json' \
    -d '{
    "commandResponse": "res",
    "sss": 1,
    "repeat": "111"
    }'
  4. Application Receive Command Response

    1
    kcat -b 10.40.80.38:29092 -t hono.command_response.MY_TENANT -o beginning -f '%K %h %s'

Done.